CUBRID Engine
heap_file.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2008 Search Solution Corporation. All rights reserved by Search Solution.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  *
18  */
19 
20 /*
21  * heap_file.c - heap file manager
22  */
23 
24 #ident "$Id$"
25 
26 #if !defined(WINDOWS)
27 #define __STDC_FORMAT_MACROS
28 #include <inttypes.h>
29 #endif
30 
31 #include "config.h"
32 
33 #include <stdio.h>
34 #include <string.h>
35 #include <errno.h>
36 
37 #include "heap_file.h"
38 
39 #include "porting.h"
40 #include "slotted_page.h"
41 #include "overflow_file.h"
42 #include "boot_sr.h"
43 #include "locator_sr.h"
44 #include "btree.h"
45 #include "transform.h" /* for CT_SERIAL_NAME */
46 #include "serial.h"
47 #include "object_primitive.h"
48 #include "xserver_interface.h"
49 #include "chartype.h"
50 #include "query_executor.h"
51 #include "fetch.h"
52 #include "server_interface.h"
53 #include "db_elo.h"
54 #include "string_opfunc.h"
55 #include "xasl.h"
56 #include "stream_to_xasl.h"
57 #include "query_opfunc.h"
58 #include "set_object.h"
59 #if defined(ENABLE_SYSTEMTAP)
60 #include "probes.h"
61 #endif /* ENABLE_SYSTEMTAP */
62 #include "dbtype.h"
63 #include "thread_manager.hpp" // for thread_get_thread_entry_info
64 
65 #if !defined(SERVER_MODE)
66 #define pthread_mutex_init(a, b)
67 #define pthread_mutex_destroy(a)
68 #define pthread_mutex_lock(a) 0
69 #define pthread_mutex_trylock(a) 0
70 #define pthread_mutex_unlock(a)
71 static int rv;
72 #endif /* not SERVER_MODE */
73 
74 #define HEAP_BESTSPACE_SYNC_THRESHOLD (0.1f)
75 
76 /* ATTRIBUTE LOCATION */
77 
78 #define OR_FIXED_ATTRIBUTES_OFFSET_BY_OBJ(obj, nvars) \
79  (OR_HEADER_SIZE(obj) + OR_VAR_TABLE_SIZE_INTERNAL(nvars, OR_GET_OFFSET_SIZE(obj)))
80 
81 #define HEAP_GUESS_NUM_ATTRS_REFOIDS 100
82 #define HEAP_GUESS_NUM_INDEXED_ATTRS 100
83 
84 #define HEAP_CLASSREPR_MAXCACHE 1024
85 
86 #define HEAP_STATS_ENTRY_MHT_EST_SIZE 1000
87 #define HEAP_STATS_ENTRY_FREELIST_SIZE 1000
88 
89 /* A good space to accept insertions */
90 #define HEAP_DROP_FREE_SPACE (int)(DB_PAGESIZE * 0.3)
91 
92 #define HEAP_DEBUG_SCANCACHE_INITPATTERN (12345)
93 
94 #if defined(CUBRID_DEBUG)
95 #define HEAP_DEBUG_ISVALID_SCANRANGE(scan_range) \
96  heap_scanrange_isvalid(scan_range)
97 #else /* CUBRID_DEBUG */
98 #define HEAP_DEBUG_ISVALID_SCANRANGE(scan_range) (DISK_VALID)
99 #endif /* !CUBRID_DEBUG */
100 
101 #define HEAP_IS_PAGE_OF_OID(thread_p, pgptr, oid) \
102  (((pgptr) != NULL) \
103  && pgbuf_get_volume_id (pgptr) == (oid)->volid \
104  && pgbuf_get_page_id (pgptr) == (oid)->pageid)
105 
106 #define MVCC_SET_DELETE_INFO(mvcc_delete_info_p, row_delete_id, \
107  satisfies_del_result) \
108  do \
109  { \
110  assert ((mvcc_delete_info_p) != NULL); \
111  (mvcc_delete_info_p)->row_delid = (row_delete_id); \
112  (mvcc_delete_info_p)->satisfies_delete_result = (satisfies_del_result); \
113  } \
114  while (0)
115 
116 #define HEAP_MVCC_SET_HEADER_MAXIMUM_SIZE(mvcc_rec_header_p) \
117  do \
118  { \
119  if (!MVCC_IS_FLAG_SET (mvcc_rec_header_p, OR_MVCC_FLAG_VALID_INSID)) \
120  { \
121  MVCC_SET_FLAG_BITS (mvcc_rec_header_p, OR_MVCC_FLAG_VALID_INSID); \
122  MVCC_SET_INSID (mvcc_rec_header_p, MVCCID_ALL_VISIBLE); \
123  } \
124  if (!MVCC_IS_FLAG_SET (mvcc_rec_header_p, OR_MVCC_FLAG_VALID_DELID)) \
125  { \
126  MVCC_SET_FLAG_BITS (mvcc_rec_header_p, OR_MVCC_FLAG_VALID_DELID); \
127  MVCC_SET_DELID (mvcc_rec_header_p, MVCCID_NULL); \
128  } \
129  if (!MVCC_IS_FLAG_SET (mvcc_rec_header_p, OR_MVCC_FLAG_VALID_PREV_VERSION)) \
130  { \
131  MVCC_SET_FLAG_BITS (mvcc_rec_header_p, OR_MVCC_FLAG_VALID_PREV_VERSION); \
132  LSA_SET_NULL(&(mvcc_rec_header_p)->prev_version_lsa); \
133  } \
134  } \
135  while (0)
136 
137 #if defined (SERVER_MODE)
138 #define HEAP_UPDATE_IS_MVCC_OP(is_mvcc_class, update_style) \
139  ((is_mvcc_class) && (!HEAP_IS_UPDATE_INPLACE (update_style)) ? (true) : (false))
140 #else
141 #define HEAP_UPDATE_IS_MVCC_OP(is_mvcc_class, update_style) (false)
142 #endif
143 
144 #define HEAP_SCAN_ORDERED_HFID(scan) \
145  (((scan) != NULL) ? (&(scan)->node.hfid) : (PGBUF_ORDERED_NULL_HFID))
146 
147 typedef enum
148 {
153 
154 /*
155  * Prefetching directions
156  */
157 
158 typedef enum
159 {
160  HEAP_DIRECTION_NONE, /* No prefetching */
161  HEAP_DIRECTION_LEFT, /* Prefetching at the left */
162  HEAP_DIRECTION_RIGHT, /* Prefetching at the right */
163  HEAP_DIRECTION_BOTH /* Prefetching at both directions.. left and right */
165 
166 /*
167  * Heap file header
168  */
169 
170 #define HEAP_NUM_BEST_SPACESTATS 10
171 
172 /* calculate an index of best array */
173 #define HEAP_STATS_NEXT_BEST_INDEX(i) \
174  (((i) + 1) % HEAP_NUM_BEST_SPACESTATS)
175 #define HEAP_STATS_PREV_BEST_INDEX(i) \
176  (((i) == 0) ? (HEAP_NUM_BEST_SPACESTATS - 1) : ((i) - 1));
177 
180 {
181  /* the first must be class_oid */
183  VFID ovf_vfid; /* Overflow file identifier (if any) */
184  VPID next_vpid; /* Next page (i.e., the 2nd page of heap file) */
185  int unfill_space; /* Stop inserting when page has run below this. leave it for updates */
186  struct
187  {
188  int num_pages; /* Estimation of number of heap pages. Consult file manager if accurate number is
189  * needed */
190  int num_recs; /* Estimation of number of objects in heap */
191  float recs_sumlen; /* Estimation total length of records */
192  int num_other_high_best; /* Total of other believed known best pages, which are not included in the best array
193  * and we believe they have at least HEAP_DROP_FREE_SPACE */
194  int num_high_best; /* Number of pages in the best array that we believe have at least
195  * HEAP_DROP_FREE_SPACE. When this number goes to zero and there is at least other
196  * HEAP_NUM_BEST_SPACESTATS best pages, we look for them. */
197  int num_substitutions; /* Number of page substitutions. This will be used to insert a new second best page
198  * into second best hints. */
199  int num_second_best; /* Number of second best hints. The hints are in "second_best" array. They are used
200  * when finding new best pages. See the function "heap_stats_sync_bestspace". */
201  int head_second_best; /* Index of head of second best hints. */
202  int tail_second_best; /* Index of tail of second best hints. A new second best hint will be stored on this
203  * index. */
204  int head; /* Head of best circular array */
205  VPID last_vpid; /* todo: move out of estimates */
209  } estimates; /* Probably, the set of pages with more free space on the heap. Changes to any values
210  * of this array (either page or the free space for the page) are not logged since
211  * these values are only used for hints. These values may not be accurate at any given
212  * time and the entries may contain duplicated pages. */
213 
214  int reserve0_for_future; /* Nothing reserved for future */
215  int reserve1_for_future; /* Nothing reserved for future */
216  int reserve2_for_future; /* Nothing reserved for future */
217 };
218 
221 {
222  HFID hfid; /* heap file identifier */
223  HEAP_BESTSPACE best; /* best space info */
225 };
226 
227 /* Define heap page flags. */
228 #define HEAP_PAGE_FLAG_VACUUM_STATUS_MASK 0xC0000000
229 #define HEAP_PAGE_FLAG_VACUUM_ONCE 0x80000000
230 #define HEAP_PAGE_FLAG_VACUUM_UNKNOWN 0x40000000
231 
232 #define HEAP_PAGE_SET_VACUUM_STATUS(chain, status) \
233  do \
234  { \
235  assert ((status) == HEAP_PAGE_VACUUM_NONE \
236  || (status) == HEAP_PAGE_VACUUM_ONCE \
237  || (status) == HEAP_PAGE_VACUUM_UNKNOWN); \
238  (chain)->flags &= ~HEAP_PAGE_FLAG_VACUUM_STATUS_MASK; \
239  if ((status) == HEAP_PAGE_VACUUM_ONCE) \
240  { \
241  (chain)->flags |= HEAP_PAGE_FLAG_VACUUM_ONCE; \
242  } \
243  else if ((status) == HEAP_PAGE_VACUUM_UNKNOWN) \
244  { \
245  (chain)->flags |= HEAP_PAGE_FLAG_VACUUM_UNKNOWN; \
246  } \
247  } \
248  while (false)
249 
250 #define HEAP_PAGE_GET_VACUUM_STATUS(chain) \
251  (((chain)->flags & HEAP_PAGE_FLAG_VACUUM_STATUS_MASK) == 0 \
252  ? HEAP_PAGE_VACUUM_NONE \
253  : ((((chain)->flags & HEAP_PAGE_FLAG_VACUUM_STATUS_MASK) \
254  == HEAP_PAGE_FLAG_VACUUM_ONCE) \
255  ? HEAP_PAGE_VACUUM_ONCE : HEAP_PAGE_VACUUM_UNKNOWN))
256 
257 typedef struct heap_chain HEAP_CHAIN;
259 { /* Double-linked */
260  /* the first must be class_oid */
262  VPID prev_vpid; /* Previous page */
263  VPID next_vpid; /* Next page */
264  MVCCID max_mvccid; /* Max MVCCID of any MVCC operations in page. */
265  INT32 flags; /* Flags for heap page. 2 bits are used for vacuum state. */
266 };
267 
268 #define HEAP_CHK_ADD_UNFOUND_RELOCOIDS 100
269 
272 {
275 };
276 
279 {
280  MHT_TABLE *ht; /* Hash table to be used to keep relocated records The key of hash table is the
281  * relocation OID, the date is the real OID */
282  bool verify;
283  bool verify_not_vacuumed; /* if true then each record will be checked if it wasn't vacuumed although it must've
284  * be vacuumed */
285  DISK_ISVALID not_vacuumed_res; /* The validation result of the "not vacuumed" objects */
288  OID *unfound_reloc_oids; /* The relocation OIDs that have not been found in hash table */
289 };
290 
291 #define DEFAULT_REPR_INCREMENT 16
292 
293 enum
294 { ZONE_VOID = 1, ZONE_FREE = 2, ZONE_LRU = 3 };
295 
298 {
299  pthread_mutex_t mutex;
300  int idx; /* Cache index. Used to pass the index when a class representation is in the cache */
301  int fcnt; /* How many times this structure has been fixed. It cannot be deallocated until this
302  * value is zero. */
303  int zone; /* ZONE_VOID, ZONE_LRU, ZONE_FREE */
305 
308  HEAP_CLASSREPR_ENTRY *prev; /* prev. entry in LRU list */
309  HEAP_CLASSREPR_ENTRY *next; /* prev. entry in LRU or free list */
310 
311  /* real data */
312  OID class_oid; /* Identifier of the class representation */
313 
314  OR_CLASSREP **repr; /* A particular representation of the class */
317 };
318 
321 {
325 };
326 
329 {
330  pthread_mutex_t hash_mutex;
331  int idx;
334 };
335 
338 {
339  pthread_mutex_t LRU_mutex;
342 };
343 
346 {
347  pthread_mutex_t free_mutex;
349  int free_cnt;
350 };
351 
354 {
357  int num_hash;
363 #ifdef DEBUG_CLASSREPR_CACHE
364  int num_fix_entries;
365  pthread_mutex_t num_fix_entries_mutex;
366 #endif /* DEBUG_CLASSREPR_CACHE */
367 };
368 
369 static HEAP_CLASSREPR_CACHE heap_Classrepr_cache = {
370  -1,
371  NULL,
372  -1,
373  NULL,
374  NULL,
375  {
376  PTHREAD_MUTEX_INITIALIZER,
377  NULL,
378  NULL},
379  {
380  PTHREAD_MUTEX_INITIALIZER,
381  NULL,
382  -1},
383  {{NULL_FILEID, NULL_VOLID}, NULL_PAGEID} /* rootclass_hfid */
384 #ifdef DEBUG_CLASSREPR_CACHE
385  , 0, PTHREAD_MUTEX_INITIALIZER
386 #endif /* DEBUG_CLASSREPR_CACHE */
387 };
388 
389 #define CLASSREPR_REPR_INCREMENT 10
390 #define CLASSREPR_HASH_SIZE (heap_Classrepr_cache.num_entries * 2)
391 #define REPR_HASH(class_oid) (OID_PSEUDO_KEY(class_oid)%CLASSREPR_HASH_SIZE)
392 
393 #define HEAP_MAYNEED_DECACHE_GUESSED_LASTREPRS(class_oid, hfid) \
394  do \
395  { \
396  if (heap_Classrepr != NULL && (hfid) != NULL) \
397  { \
398  if (HFID_IS_NULL (&(heap_Classrepr->rootclass_hfid))) \
399  (void) boot_find_root_heap (&(heap_Classrepr->rootclass_hfid)); \
400  if (HFID_EQ ((hfid), &(heap_Classrepr->rootclass_hfid))) \
401  (void) heap_classrepr_decache_guessed_last (class_oid); \
402  } \
403  } \
404  while (0)
405 
406 #define HEAP_CHNGUESS_FUDGE_MININDICES (100)
407 #define HEAP_NBITS_IN_BYTE (8)
408 #define HEAP_NSHIFTS (3) /* For multiplication/division by 8 */
409 #define HEAP_BITMASK (HEAP_NBITS_IN_BYTE - 1)
410 #define HEAP_NBITS_TO_NBYTES(bit_cnt) \
411  ((unsigned int)((bit_cnt) + HEAP_BITMASK) >> HEAP_NSHIFTS)
412 #define HEAP_NBYTES_TO_NBITS(byte_cnt) ((unsigned int)(byte_cnt) << HEAP_NSHIFTS)
413 #define HEAP_NBYTES_CLEARED(byte_ptr, byte_cnt) \
414  memset((byte_ptr), '\0', (byte_cnt))
415 #define HEAP_BYTEOFFSET_OFBIT(bit_num) ((unsigned int)(bit_num) >> HEAP_NSHIFTS)
416 #define HEAP_BYTEGET(byte_ptr, bit_num) \
417  ((unsigned char *)(byte_ptr) + HEAP_BYTEOFFSET_OFBIT(bit_num))
418 
419 #define HEAP_BITMASK_INBYTE(bit_num) \
420  (1 << ((unsigned int)(bit_num) & HEAP_BITMASK))
421 #define HEAP_BIT_GET(byte_ptr, bit_num) \
422  (*HEAP_BYTEGET(byte_ptr, bit_num) & HEAP_BITMASK_INBYTE(bit_num))
423 #define HEAP_BIT_SET(byte_ptr, bit_num) \
424  (*HEAP_BYTEGET(byte_ptr, bit_num) = \
425  *HEAP_BYTEGET(byte_ptr, bit_num) | HEAP_BITMASK_INBYTE(bit_num))
426 #define HEAP_BIT_CLEAR(byte_ptr, bit_num) \
427  (*HEAP_BYTEGET(byte_ptr, bit_num) = \
428  *HEAP_BYTEGET(byte_ptr, bit_num) & ~HEAP_BITMASK_INBYTE(bit_num))
429 
432 { /* Currently, only classes are cached */
433  int idx; /* Index number of this entry */
434  int chn; /* Cache coherence number of object */
435  bool recently_accessed; /* Reference value 0/1 used by replacement clock algorithm */
436  OID oid; /* Identifier of object */
437  unsigned char *bits; /* Bit index array describing client transaction indices. Bit n corresponds to client
438  * tran index n If Bit is ON, we guess that the object is cached in the workspace of
439  * the client. */
440 };
441 
444 {
445  MHT_TABLE *ht; /* Hash table for guessing chn */
446  HEAP_CHNGUESS_ENTRY *entries; /* Pointers to entry structures. More than one entry */
447  unsigned char *bitindex; /* Bit index array for each entry. Describe all entries. Each entry is subdivided into
448  * nbytes. */
449  bool schema_change; /* Has the schema been changed */
450  int clock_hand; /* Clock hand for replacement */
451  int num_entries; /* Number of guesschn entries */
452  int num_clients; /* Number of clients in bitindex for each entry */
453  int nbytes; /* Number of bytes in bitindex. It must be aligned to multiples of 4 bytes (integers) */
454 };
455 
458 {
459  int num_stats_entries; /* number of cache entries in use */
460  MHT_TABLE *hfid_ht; /* HFID Hash table for best space */
461  MHT_TABLE *vpid_ht; /* VPID Hash table for best space */
463  int num_free;
464  int free_list_count; /* number of entries in free */
466  pthread_mutex_t bestspace_mutex;
467 };
468 
471 {
472  HFID *hfids; /* Array of class HFID */
473  int hfids_count; /* Count of above hfids array */
474 };
475 
476 static int heap_Maxslotted_reclength;
477 static int heap_Slotted_overhead = 4; /* sizeof (SPAGE_SLOT) */
478 static const int heap_Find_best_page_limit = 100;
479 
480 static HEAP_CLASSREPR_CACHE *heap_Classrepr = NULL;
481 static HEAP_CHNGUESS heap_Guesschn_area = { NULL, NULL, NULL, false, 0,
482  0, 0, 0
483 };
484 
485 static HEAP_CHNGUESS *heap_Guesschn = NULL;
486 
487 static HEAP_STATS_BESTSPACE_CACHE heap_Bestspace_cache_area =
488  { 0, NULL, NULL, 0, 0, 0, NULL, PTHREAD_MUTEX_INITIALIZER };
489 
490 static HEAP_STATS_BESTSPACE_CACHE *heap_Bestspace = NULL;
491 
494 };
495 
496 static HEAP_HFID_TABLE *heap_Hfid_table = NULL;
497 
498 /* Recovery. */
499 #define HEAP_RV_FLAG_VACUUM_STATUS_CHANGE 0x8000
500 
501 #define HEAP_PERF_START(thread_p, context) \
502  PERF_UTIME_TRACKER_START (thread_p, (context)->time_track)
503 #define HEAP_PERF_TRACK_PREPARE(thread_p, context) \
504  do \
505  { \
506  if ((context)->time_track == NULL) break; \
507  switch ((context)->type) { \
508  case HEAP_OPERATION_INSERT: \
509  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_INSERT_PREPARE); \
510  break; \
511  case HEAP_OPERATION_DELETE: \
512  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_DELETE_PREPARE); \
513  break; \
514  case HEAP_OPERATION_UPDATE: \
515  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_UPDATE_PREPARE); \
516  break; \
517  default: \
518  assert (false); \
519  } \
520  } \
521  while (false)
522 #define HEAP_PERF_TRACK_EXECUTE(thread_p, context) \
523  do \
524  { \
525  if ((context)->time_track == NULL) break; \
526  switch ((context)->type) { \
527  case HEAP_OPERATION_INSERT: \
528  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, \
529  (context)->time_track,\
530  PSTAT_HEAP_INSERT_EXECUTE); \
531  break; \
532  case HEAP_OPERATION_DELETE: \
533  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_DELETE_EXECUTE); \
534  break; \
535  case HEAP_OPERATION_UPDATE: \
536  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_UPDATE_EXECUTE); \
537  break; \
538  default: \
539  assert (false); \
540  } \
541  } \
542  while (false)
543 #define HEAP_PERF_TRACK_LOGGING(thread_p, context) \
544  do \
545  { \
546  if ((context)->time_track == NULL) break; \
547  switch ((context)->type) { \
548  case HEAP_OPERATION_INSERT: \
549  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_INSERT_LOG); \
550  break; \
551  case HEAP_OPERATION_DELETE: \
552  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_DELETE_LOG); \
553  break; \
554  case HEAP_OPERATION_UPDATE: \
555  PERF_UTIME_TRACKER_ADD_TIME_AND_RESTART (thread_p, (context)->time_track, PSTAT_HEAP_UPDATE_LOG); \
556  break; \
557  default: \
558  assert (false); \
559  } \
560  } \
561  while (false)
562 
563 #if defined (NDEBUG)
565  LOCK lock, HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * pg_watcher);
566 #else /* !NDEBUG */
567 #define heap_scan_pb_lock_and_fetch(...) \
568  heap_scan_pb_lock_and_fetch_debug (__VA_ARGS__, ARG_FILE_LINE)
569 
570 static PAGE_PTR heap_scan_pb_lock_and_fetch_debug (THREAD_ENTRY * thread_p, const VPID * vpid_ptr,
571  PAGE_FETCH_MODE fetch_mode, LOCK lock, HEAP_SCANCACHE * scan_cache,
572  PGBUF_WATCHER * pg_watcher, const char *caller_file,
573  const int caller_line);
574 #endif /* !NDEBUG */
575 
576 
577 static int heap_classrepr_initialize_cache (void);
578 static int heap_classrepr_finalize_cache (void);
579 static int heap_classrepr_decache_guessed_last (const OID * class_oid);
580 #ifdef SERVER_MODE
581 static int heap_classrepr_lock_class (THREAD_ENTRY * thread_p, HEAP_CLASSREPR_HASH * hash_anchor,
582  const OID * class_oid);
583 static int heap_classrepr_unlock_class (HEAP_CLASSREPR_HASH * hash_anchor, const OID * class_oid, int need_hash_mutex);
584 #endif
585 
586 static int heap_classrepr_dump (THREAD_ENTRY * thread_p, FILE * fp, const OID * class_oid, const OR_CLASSREP * repr);
587 #ifdef DEBUG_CLASSREPR_CACHE
588 static int heap_classrepr_dump_cache (bool simple_dump);
589 #endif /* DEBUG_CLASSREPR_CACHE */
590 
591 static int heap_classrepr_entry_reset (HEAP_CLASSREPR_ENTRY * cache_entry);
592 static int heap_classrepr_entry_remove_from_LRU (HEAP_CLASSREPR_ENTRY * cache_entry);
593 static HEAP_CLASSREPR_ENTRY *heap_classrepr_entry_alloc (void);
594 static int heap_classrepr_entry_free (HEAP_CLASSREPR_ENTRY * cache_entry);
595 
596 static OR_CLASSREP *heap_classrepr_get_from_record (THREAD_ENTRY * thread_p, REPR_ID * last_reprid,
597  const OID * class_oid, RECDES * class_recdes, REPR_ID reprid);
598 static int heap_stats_get_min_freespace (HEAP_HDR_STATS * heap_hdr);
599 static int heap_stats_update_internal (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * lotspace_vpid,
600  int free_space);
601 static void heap_stats_put_second_best (HEAP_HDR_STATS * heap_hdr, VPID * vpid);
602 static int heap_stats_get_second_best (HEAP_HDR_STATS * heap_hdr, VPID * vpid);
603 #if defined(ENABLE_UNUSED_FUNCTION)
604 static int heap_stats_quick_num_fit_in_bestspace (HEAP_BESTSPACE * bestspace, int num_entries, int unit_size,
605  int unfill_space);
606 #endif
607 static HEAP_FINDSPACE heap_stats_find_page_in_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid,
608  HEAP_BESTSPACE * bestspace, int *idx_badspace,
609  int record_length, int needed_space,
610  HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * pg_watcher);
611 static PAGE_PTR heap_stats_find_best_page (THREAD_ENTRY * thread_p, const HFID * hfid, int needed_space, bool isnew_rec,
612  int newrec_size, HEAP_SCANCACHE * space_cache, PGBUF_WATCHER * pg_watcher);
613 static int heap_stats_sync_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_STATS * heap_hdr,
614  VPID * hdr_vpid, bool scan_all, bool can_cycle);
615 
616 static int heap_get_last_page (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_STATS * heap_hdr,
617  HEAP_SCANCACHE * scan_cache, VPID * last_vpid, PGBUF_WATCHER * pg_watcher);
618 
619 static int heap_vpid_init_new (THREAD_ENTRY * thread_p, PAGE_PTR page, void *args);
620 static int heap_vpid_alloc (THREAD_ENTRY * thread_p, const HFID * hfid, PAGE_PTR hdr_pgptr, HEAP_HDR_STATS * heap_hdr,
621  HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * new_pg_watcher);
622 static VPID *heap_vpid_remove (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_STATS * heap_hdr, VPID * rm_vpid);
623 
624 static int heap_create_internal (THREAD_ENTRY * thread_p, HFID * hfid, const OID * class_oid, const bool reuse_oid);
625 static const HFID *heap_reuse (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * class_oid, const bool reuse_oid);
626 static bool heap_delete_all_page_records (THREAD_ENTRY * thread_p, const VPID * vpid, PAGE_PTR pgptr);
627 static int heap_reinitialize_page (THREAD_ENTRY * thread_p, PAGE_PTR pgptr, const bool is_header_page);
628 #if defined(CUBRID_DEBUG)
629 static DISK_ISVALID heap_hfid_isvalid (HFID * hfid);
630 static DISK_ISVALID heap_scanrange_isvalid (HEAP_SCANRANGE * scan_range);
631 #endif /* CUBRID_DEBUG */
632 static OID *heap_ovf_insert (THREAD_ENTRY * thread_p, const HFID * hfid, OID * ovf_oid, RECDES * recdes);
633 static const OID *heap_ovf_update (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * ovf_oid, RECDES * recdes);
634 static int heap_ovf_flush (THREAD_ENTRY * thread_p, const OID * ovf_oid);
635 static int heap_ovf_get_length (THREAD_ENTRY * thread_p, const OID * ovf_oid);
636 static SCAN_CODE heap_ovf_get (THREAD_ENTRY * thread_p, const OID * ovf_oid, RECDES * recdes, int chn,
638 static int heap_ovf_get_capacity (THREAD_ENTRY * thread_p, const OID * ovf_oid, int *ovf_len, int *ovf_num_pages,
639  int *ovf_overhead, int *ovf_free_space);
640 
641 static int heap_scancache_check_with_hfid (THREAD_ENTRY * thread_p, HFID * hfid, OID * class_oid,
642  HEAP_SCANCACHE ** scan_cache);
643 static int heap_scancache_start_internal (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, const HFID * hfid,
644  const OID * class_oid, int cache_last_fix_page, bool is_queryscan,
645  int is_indexscan, MVCC_SNAPSHOT * mvcc_snapshot);
646 static int heap_scancache_force_modify (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache);
647 static int heap_scancache_reset_modify (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, const HFID * hfid,
648  const OID * class_oid);
649 static int heap_scancache_quick_start_internal (HEAP_SCANCACHE * scan_cache, const HFID * hfid);
650 static int heap_scancache_quick_end (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache);
651 static int heap_scancache_end_internal (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, bool scan_state);
652 static SCAN_CODE heap_get_if_diff_chn (THREAD_ENTRY * thread_p, PAGE_PTR pgptr, INT16 slotid, RECDES * recdes,
653  bool ispeeking, int chn, MVCC_SNAPSHOT * mvcc_snapshot);
654 static int heap_estimate_avg_length (THREAD_ENTRY * thread_p, const HFID * hfid);
655 static int heap_get_capacity (THREAD_ENTRY * thread_p, const HFID * hfid, INT64 * num_recs, INT64 * num_recs_relocated,
656  INT64 * num_recs_inovf, INT64 * num_pages, int *avg_freespace, int *avg_freespace_nolast,
657  int *avg_reclength, int *avg_overhead);
658 #if 0 /* TODO: remove unused */
659 static int heap_moreattr_attrinfo (int attrid, HEAP_CACHE_ATTRINFO * attr_info);
660 #endif
661 
662 static int heap_attrinfo_recache_attrepr (HEAP_CACHE_ATTRINFO * attr_info, bool islast_reset);
663 static int heap_attrinfo_recache (THREAD_ENTRY * thread_p, REPR_ID reprid, HEAP_CACHE_ATTRINFO * attr_info);
664 static int heap_attrinfo_check (const OID * inst_oid, HEAP_CACHE_ATTRINFO * attr_info);
665 static int heap_attrinfo_set_uninitialized (THREAD_ENTRY * thread_p, OID * inst_oid, RECDES * recdes,
666  HEAP_CACHE_ATTRINFO * attr_info);
667 static int heap_attrinfo_start_refoids (THREAD_ENTRY * thread_p, OID * class_oid, HEAP_CACHE_ATTRINFO * attr_info);
668 static int heap_attrinfo_get_disksize (HEAP_CACHE_ATTRINFO * attr_info, bool is_mvcc_class, int *offset_size_ptr);
669 
670 static int heap_attrvalue_read (RECDES * recdes, HEAP_ATTRVALUE * value, HEAP_CACHE_ATTRINFO * attr_info);
671 
672 static int heap_midxkey_get_value (RECDES * recdes, OR_ATTRIBUTE * att, DB_VALUE * value,
673  HEAP_CACHE_ATTRINFO * attr_info);
674 static OR_ATTRIBUTE *heap_locate_attribute (ATTR_ID attrid, HEAP_CACHE_ATTRINFO * attr_info);
675 
676 static DB_MIDXKEY *heap_midxkey_key_get (RECDES * recdes, DB_MIDXKEY * midxkey, OR_INDEX * index,
677  HEAP_CACHE_ATTRINFO * attrinfo, DB_VALUE * func_res, TP_DOMAIN * func_domain,
678  TP_DOMAIN ** key_domain);
679 static DB_MIDXKEY *heap_midxkey_key_generate (THREAD_ENTRY * thread_p, RECDES * recdes, DB_MIDXKEY * midxkey,
680  int *att_ids, HEAP_CACHE_ATTRINFO * attrinfo, DB_VALUE * func_res,
681  int func_col_id, int func_attr_index_start);
682 
683 static int heap_dump_hdr (FILE * fp, HEAP_HDR_STATS * heap_hdr);
684 
685 static int heap_eval_function_index (THREAD_ENTRY * thread_p, FUNCTION_INDEX_INFO * func_index_info, int n_atts,
686  int *att_ids, HEAP_CACHE_ATTRINFO * attr_info, RECDES * recdes, int btid_index,
687  DB_VALUE * result, FUNC_PRED_UNPACK_INFO * func_pred, TP_DOMAIN ** fi_domain);
688 
689 static DISK_ISVALID heap_check_all_pages_by_heapchain (THREAD_ENTRY * thread_p, HFID * hfid,
690  HEAP_CHKALL_RELOCOIDS * chk_objs, INT32 * num_checked);
691 
692 #if defined (SA_MODE)
693 static DISK_ISVALID heap_check_all_pages_by_file_table (THREAD_ENTRY * thread_p, HFID * hfid,
694  HEAP_CHKALL_RELOCOIDS * chk_objs);
695 static int heap_file_map_chkreloc (THREAD_ENTRY * thread_p, PAGE_PTR * page, bool * stop, void *args);
696 #endif /* SA_MODE */
697 
698 static DISK_ISVALID heap_chkreloc_start (HEAP_CHKALL_RELOCOIDS * chk);
699 static DISK_ISVALID heap_chkreloc_end (HEAP_CHKALL_RELOCOIDS * chk);
700 static int heap_chkreloc_print_notfound (const void *ignore_reloc_oid, void *ent, void *xchk);
701 static DISK_ISVALID heap_chkreloc_next (THREAD_ENTRY * thread_p, HEAP_CHKALL_RELOCOIDS * chk, PAGE_PTR pgptr);
702 
703 static int heap_chnguess_initialize (void);
704 static int heap_chnguess_realloc (void);
705 static int heap_chnguess_finalize (void);
706 static int heap_chnguess_decache (const OID * oid);
707 static int heap_chnguess_remove_entry (const void *oid_key, void *ent, void *xignore);
708 
709 static int heap_stats_bestspace_initialize (void);
710 static int heap_stats_bestspace_finalize (void);
711 
712 static int heap_get_spage_type (void);
713 static bool heap_is_reusable_oid (const FILE_TYPE file_type);
714 
715 static SCAN_CODE heap_attrinfo_transform_to_disk_internal (THREAD_ENTRY * thread_p, HEAP_CACHE_ATTRINFO * attr_info,
716  RECDES * old_recdes, RECDES * new_recdes,
717  int lob_create_flag);
718 static int heap_stats_del_bestspace_by_vpid (THREAD_ENTRY * thread_p, VPID * vpid);
719 static int heap_stats_del_bestspace_by_hfid (THREAD_ENTRY * thread_p, const HFID * hfid);
720 static HEAP_BESTSPACE heap_stats_get_bestspace_by_vpid (THREAD_ENTRY * thread_p, VPID * vpid);
721 static HEAP_STATS_ENTRY *heap_stats_add_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * vpid,
722  int freespace);
723 static int heap_stats_entry_free (THREAD_ENTRY * thread_p, void *data, void *args);
724 static int heap_get_partitions_from_subclasses (THREAD_ENTRY * thread_p, const OID * subclasses, int *parts_count,
725  OR_PARTITION * partitions);
726 static int heap_class_get_partition_info (THREAD_ENTRY * thread_p, const OID * class_oid, OR_PARTITION * partition_info,
727  HFID * class_hfid, REPR_ID * repr_id, int *has_partition_info);
728 static int heap_get_partition_attributes (THREAD_ENTRY * thread_p, const OID * cls_oid, ATTR_ID * type_id,
729  ATTR_ID * values_id);
730 static int heap_get_class_subclasses (THREAD_ENTRY * thread_p, const OID * class_oid, int *count, OID ** subclasses);
731 
732 static unsigned int heap_hash_vpid (const void *key_vpid, unsigned int htsize);
733 static int heap_compare_vpid (const void *key_vpid1, const void *key_vpid2);
734 static unsigned int heap_hash_hfid (const void *key_hfid, unsigned int htsize);
735 static int heap_compare_hfid (const void *key_hfid1, const void *key_hfid2);
736 
737 static char *heap_bestspace_to_string (char *buf, int buf_size, const HEAP_BESTSPACE * hb);
738 
739 static int fill_string_to_buffer (char **start, char *end, const char *str);
740 
741 static SCAN_CODE heap_get_record_info (THREAD_ENTRY * thread_p, const OID oid, RECDES * recdes, RECDES forward_recdes,
742  PGBUF_WATCHER * page_watcher, HEAP_SCANCACHE * scan_cache, bool ispeeking,
743  DB_VALUE ** record_info);
744 static SCAN_CODE heap_next_internal (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * next_oid,
745  RECDES * recdes, HEAP_SCANCACHE * scan_cache, bool ispeeking,
746  bool reversed_direction, DB_VALUE ** cache_recordinfo);
747 
748 static SCAN_CODE heap_get_page_info (THREAD_ENTRY * thread_p, const OID * cls_oid, const HFID * hfid, const VPID * vpid,
749  const PAGE_PTR pgptr, DB_VALUE ** page_info);
750 static int heap_scancache_start_chain_update (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * new_scan_cache,
751  HEAP_SCANCACHE * old_scan_cache, OID * next_row_version);
752 static SCAN_CODE heap_get_bigone_content (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, bool ispeeking,
753  OID * forward_oid, RECDES * recdes);
754 static void heap_mvcc_log_insert (THREAD_ENTRY * thread_p, RECDES * p_recdes, LOG_DATA_ADDR * p_addr);
755 static void heap_mvcc_log_delete (THREAD_ENTRY * thread_p, LOG_DATA_ADDR * p_addr, LOG_RCVINDEX rcvindex);
756 static int heap_rv_mvcc_redo_delete_internal (THREAD_ENTRY * thread_p, PAGE_PTR page, PGSLOTID slotid, MVCCID mvccid);
757 static void heap_mvcc_log_home_change_on_delete (THREAD_ENTRY * thread_p, RECDES * old_recdes, RECDES * new_recdes,
758  LOG_DATA_ADDR * p_addr);
759 static void heap_mvcc_log_home_no_change (THREAD_ENTRY * thread_p, LOG_DATA_ADDR * p_addr);
760 
761 static void heap_mvcc_log_redistribute (THREAD_ENTRY * thread_p, RECDES * p_recdes, LOG_DATA_ADDR * p_addr);
762 
763 #if defined(ENABLE_UNUSED_FUNCTION)
764 static INLINE int heap_try_fetch_header_page (THREAD_ENTRY * thread_p, PAGE_PTR * home_pgptr_p,
765  const VPID * home_vpid_p, const OID * oid_p, PAGE_PTR * hdr_pgptr_p,
766  const VPID * hdr_vpid_p, HEAP_SCANCACHE * scan_cache, int *again_count,
767  int again_max) __attribute__ ((ALWAYS_INLINE));
768 static INLINE int heap_try_fetch_forward_page (THREAD_ENTRY * thread_p, PAGE_PTR * home_pgptr_p,
769  const VPID * home_vpid_p, const OID * oid_p, PAGE_PTR * fwd_pgptr_p,
770  const VPID * fwd_vpid_p, const OID * fwd_oid_p,
771  HEAP_SCANCACHE * scan_cache, int *again_count, int again_max)
773 static INLINE int heap_try_fetch_header_with_forward_page (THREAD_ENTRY * thread_p, PAGE_PTR * home_pgptr_p,
774  const VPID * home_vpid_p, const OID * oid_p,
775  PAGE_PTR * hdr_pgptr_p, const VPID * hdr_vpid_p,
776  PAGE_PTR * fwd_pgptr_p, const VPID * fwd_vpid_p,
777  const OID * fwd_oid_p, HEAP_SCANCACHE * scan_cache,
778  int *again_count, int again_max)
780 #endif /* ENABLE_UNUSED_FUNCTION */
781 
782 /* common */
783 static void heap_link_watchers (HEAP_OPERATION_CONTEXT * child, HEAP_OPERATION_CONTEXT * parent);
784 static void heap_unfix_watchers (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context);
785 static void heap_clear_operation_context (HEAP_OPERATION_CONTEXT * context, HFID * hfid_p);
786 static int heap_mark_class_as_modified (THREAD_ENTRY * thread_p, OID * oid_p, int chn, bool decache);
787 static FILE_TYPE heap_get_file_type (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context);
788 static int heap_is_valid_oid (THREAD_ENTRY * thread_p, OID * oid);
789 static int heap_fix_header_page (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context);
790 static int heap_fix_forward_page (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, OID * forward_oid_hint);
791 static void heap_build_forwarding_recdes (RECDES * recdes_p, INT16 rec_type, OID * forward_oid);
792 
793 /* heap insert related functions */
794 static int heap_insert_adjust_recdes_header (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context,
795  bool is_mvcc_class);
796 static int heap_update_adjust_recdes_header (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * update_context,
797  bool is_mvcc_class);
798 static int heap_insert_handle_multipage_record (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context);
799 static int heap_get_insert_location_with_lock (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context,
800  PGBUF_WATCHER * home_hint_p);
801 static int heap_find_location_and_insert_rec_newhome (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context);
802 static int heap_insert_newhome (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * parent_context, RECDES * recdes_p,
803  OID * out_oid_p, PGBUF_WATCHER * newhome_pg_watcher);
804 static int heap_insert_physical (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context);
805 static void heap_log_insert_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, VFID * vfid_p, OID * oid_p,
806  RECDES * recdes_p, bool is_mvcc_op, bool is_redistribute_op);
807 
808 /* heap delete related functions */
809 static void heap_delete_adjust_header (MVCC_REC_HEADER * header_p, MVCCID mvcc_id, bool need_mvcc_header_max_size);
810 static int heap_get_record_location (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context);
811 static int heap_delete_bigone (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op);
812 static int heap_delete_relocation (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op);
813 static int heap_delete_home (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op);
814 static int heap_delete_physical (THREAD_ENTRY * thread_p, HFID * hfid_p, PAGE_PTR page_p, OID * oid_p);
815 static void heap_log_delete_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, VFID * vfid_p, OID * oid_p,
816  RECDES * recdes_p, bool mark_reusable, LOG_LSA * undo_lsa);
817 
818 /* heap update related functions */
819 static int heap_update_bigone (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op);
820 static int heap_update_relocation (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op);
821 static int heap_update_home (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op);
822 static int heap_update_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, short slot_id, RECDES * recdes_p);
823 static void heap_log_update_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, VFID * vfid_p, OID * oid_p,
824  RECDES * old_recdes_p, RECDES * new_recdes_p, LOG_RCVINDEX rcvindex);
825 
826 static void *heap_hfid_table_entry_alloc (void);
827 static int heap_hfid_table_entry_free (void *unique_stat);
828 static int heap_hfid_table_entry_init (void *unique_stat);
829 static int heap_hfid_table_entry_key_copy (void *src, void *dest);
830 static unsigned int heap_hfid_table_entry_key_hash (void *key, int hash_table_size);
831 static int heap_hfid_table_entry_key_compare (void *k1, void *k2);
832 static int heap_hfid_cache_get (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid, FILE_TYPE * ftype_out);
833 static int heap_get_hfid_from_class_record (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid);
834 
835 static void heap_page_update_chain_after_mvcc_op (THREAD_ENTRY * thread_p, PAGE_PTR heap_page, MVCCID mvccid);
836 static void heap_page_rv_chain_update (THREAD_ENTRY * thread_p, PAGE_PTR heap_page, MVCCID mvccid,
837  bool vacuum_status_change);
838 
839 static int heap_scancache_add_partition_node (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache,
840  OID * partition_oid);
841 static SCAN_CODE heap_get_visible_version_from_log (THREAD_ENTRY * thread_p, RECDES * recdes,
842  LOG_LSA * previous_version_lsa, HEAP_SCANCACHE * scan_cache,
843  int has_chn);
844 static int heap_update_set_prev_version (THREAD_ENTRY * thread_p, const OID * oid, PGBUF_WATCHER * home_pg_watcher,
845  PGBUF_WATCHER * fwd_pg_watcher, LOG_LSA * prev_version_lsa);
846 static int heap_scan_cache_allocate_recdes_data (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache_p,
847  RECDES * recdes_p, int size);
848 
849 static int heap_get_header_page (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * header_vpid);
850 
853 STATIC_INLINE int heap_copy_header_stats (THREAD_ENTRY * thread_p, PAGE_PTR page_header, HEAP_HDR_STATS * header_stats)
857 STATIC_INLINE int heap_copy_chain (THREAD_ENTRY * thread_p, PAGE_PTR page_heap, HEAP_CHAIN * chain)
859 STATIC_INLINE int heap_get_last_vpid (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * last_vpid)
861 
862 /*
863  * heap_hash_vpid () - Hash a page identifier
864  * return: hash value
865  * key_vpid(in): VPID to hash
866  * htsize(in): Size of hash table
867  */
868 static unsigned int
869 heap_hash_vpid (const void *key_vpid, unsigned int htsize)
870 {
871  const VPID *vpid = (VPID *) key_vpid;
872 
873  return ((vpid->pageid | ((unsigned int) vpid->volid) << 24) % htsize);
874 }
875 
876 /*
877  * heap_compare_vpid () - Compare two vpids keys for hashing
878  * return: int (key_vpid1 == key_vpid2 ?)
879  * key_vpid1(in): First key
880  * key_vpid2(in): Second key
881  */
882 static int
883 heap_compare_vpid (const void *key_vpid1, const void *key_vpid2)
884 {
885  const VPID *vpid1 = (VPID *) key_vpid1;
886  const VPID *vpid2 = (VPID *) key_vpid2;
887 
888  return VPID_EQ (vpid1, vpid2);
889 }
890 
891 /*
892  * heap_hash_hfid () - Hash a file identifier
893  * return: hash value
894  * key_hfid(in): HFID to hash
895  * htsize(in): Size of hash table
896  */
897 static unsigned int
898 heap_hash_hfid (const void *key_hfid, unsigned int htsize)
899 {
900  const HFID *hfid = (HFID *) key_hfid;
901 
902  return ((hfid->hpgid | ((unsigned int) hfid->vfid.volid) << 24) % htsize);
903 }
904 
905 /*
906  * heap_compare_hfid () - Compare two hfids keys for hashing
907  * return: int (key_hfid1 == key_hfid2 ?)
908  * key_hfid1(in): First key
909  * key_hfid2(in): Second key
910  */
911 static int
912 heap_compare_hfid (const void *key_hfid1, const void *key_hfid2)
913 {
914  const HFID *hfid1 = (HFID *) key_hfid1;
915  const HFID *hfid2 = (HFID *) key_hfid2;
916 
917  return HFID_EQ (hfid1, hfid2);
918 }
919 
920 /*
921  * heap_stats_entry_free () - release all memory occupied by an best space
922  * return: NO_ERROR
923  * data(in): a best space associated with the key
924  * args(in): NULL (not used here, but needed by mht_map)
925  */
926 static int
927 heap_stats_entry_free (THREAD_ENTRY * thread_p, void *data, void *args)
928 {
929  HEAP_STATS_ENTRY *ent;
930 
931  ent = (HEAP_STATS_ENTRY *) data;
932  assert_release (ent != NULL);
933 
934  if (ent)
935  {
936  if (heap_Bestspace->free_list_count < HEAP_STATS_ENTRY_FREELIST_SIZE)
937  {
938  ent->next = heap_Bestspace->free_list;
939  heap_Bestspace->free_list = ent;
940 
941  heap_Bestspace->free_list_count++;
942  }
943  else
944  {
945  free_and_init (ent);
946 
947  heap_Bestspace->num_free++;
948  }
949  }
950 
951  return NO_ERROR;
952 }
953 
954 /*
955  * heap_stats_add_bestspace () -
956  */
957 static HEAP_STATS_ENTRY *
958 heap_stats_add_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * vpid, int freespace)
959 {
960  HEAP_STATS_ENTRY *ent;
961  int rc;
962 
964 
965  rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex);
966 
967  ent = (HEAP_STATS_ENTRY *) mht_get (heap_Bestspace->vpid_ht, vpid);
968 
969  if (ent)
970  {
971  ent->best.freespace = freespace;
972  goto end;
973  }
974 
976  {
979 
981 
982 
983  ent = NULL;
984  goto end;
985  }
986 
987  if (heap_Bestspace->free_list_count > 0)
988  {
989  assert_release (heap_Bestspace->free_list != NULL);
990 
991  ent = heap_Bestspace->free_list;
992  if (ent == NULL)
993  {
994  goto end;
995  }
996  heap_Bestspace->free_list = ent->next;
997  ent->next = NULL;
998 
999  heap_Bestspace->free_list_count--;
1000  }
1001  else
1002  {
1003  ent = (HEAP_STATS_ENTRY *) malloc (sizeof (HEAP_STATS_ENTRY));
1004  if (ent == NULL)
1005  {
1007 
1008  goto end;
1009  }
1010 
1011  heap_Bestspace->num_alloc++;
1012  }
1013 
1014  HFID_COPY (&ent->hfid, hfid);
1015  ent->best.vpid = *vpid;
1016  ent->best.freespace = freespace;
1017  ent->next = NULL;
1018 
1019  if (mht_put (heap_Bestspace->vpid_ht, &ent->best.vpid, ent) == NULL)
1020  {
1021  assert_release (false);
1022  (void) heap_stats_entry_free (thread_p, ent, NULL);
1023  ent = NULL;
1024  goto end;
1025  }
1026 
1027  if (mht_put_new (heap_Bestspace->hfid_ht, &ent->hfid, ent) == NULL)
1028  {
1029  assert_release (false);
1030  (void) mht_rem (heap_Bestspace->vpid_ht, &ent->best.vpid, NULL, NULL);
1031  (void) heap_stats_entry_free (thread_p, ent, NULL);
1032  ent = NULL;
1033  goto end;
1034  }
1035 
1036  heap_Bestspace->num_stats_entries++;
1037 
1038 end:
1039 
1040  assert (mht_count (heap_Bestspace->vpid_ht) == mht_count (heap_Bestspace->hfid_ht));
1041 
1042  pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex);
1043 
1044  return ent;
1045 }
1046 
1047 /*
1048  * heap_stats_del_bestspace_by_hfid () -
1049  * return: deleted count
1050  *
1051  * hfid(in):
1052  */
1053 static int
1054 heap_stats_del_bestspace_by_hfid (THREAD_ENTRY * thread_p, const HFID * hfid)
1055 {
1056  HEAP_STATS_ENTRY *ent;
1057  int del_cnt = 0;
1058  int rc;
1059 
1060  rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex);
1061 
1062  while ((ent = (HEAP_STATS_ENTRY *) mht_get2 (heap_Bestspace->hfid_ht, hfid, NULL)) != NULL)
1063  {
1064  (void) mht_rem2 (heap_Bestspace->hfid_ht, &ent->hfid, ent, NULL, NULL);
1065  (void) mht_rem (heap_Bestspace->vpid_ht, &ent->best.vpid, NULL, NULL);
1066  (void) heap_stats_entry_free (thread_p, ent, NULL);
1067  ent = NULL;
1068 
1069  del_cnt++;
1070  }
1071 
1072  assert (del_cnt <= heap_Bestspace->num_stats_entries);
1073 
1074  heap_Bestspace->num_stats_entries -= del_cnt;
1075 
1076  assert (mht_count (heap_Bestspace->vpid_ht) == mht_count (heap_Bestspace->hfid_ht));
1077  pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex);
1078 
1079  return del_cnt;
1080 }
1081 
1082 /*
1083  * heap_stats_del_bestspace_by_vpid () -
1084  * return: NO_ERROR
1085  *
1086  * vpid(in):
1087  */
1088 static int
1089 heap_stats_del_bestspace_by_vpid (THREAD_ENTRY * thread_p, VPID * vpid)
1090 {
1091  HEAP_STATS_ENTRY *ent;
1092  int rc;
1093 
1094  rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex);
1095 
1096  ent = (HEAP_STATS_ENTRY *) mht_get (heap_Bestspace->vpid_ht, vpid);
1097  if (ent == NULL)
1098  {
1099  goto end;
1100  }
1101 
1102  (void) mht_rem2 (heap_Bestspace->hfid_ht, &ent->hfid, ent, NULL, NULL);
1103  (void) mht_rem (heap_Bestspace->vpid_ht, &ent->best.vpid, NULL, NULL);
1104  (void) heap_stats_entry_free (thread_p, ent, NULL);
1105  ent = NULL;
1106 
1107  heap_Bestspace->num_stats_entries -= 1;
1108 
1109 end:
1110  assert (mht_count (heap_Bestspace->vpid_ht) == mht_count (heap_Bestspace->hfid_ht));
1111 
1112  pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex);
1113 
1114  return NO_ERROR;
1115 }
1116 
1117 /*
1118  * heap_stats_get_bestspace_by_vpid () -
1119  * return: NO_ERROR
1120  *
1121  * vpid(in):
1122  */
1123 static HEAP_BESTSPACE
1124 heap_stats_get_bestspace_by_vpid (THREAD_ENTRY * thread_p, VPID * vpid)
1125 {
1126  HEAP_STATS_ENTRY *ent;
1127  HEAP_BESTSPACE best;
1128  int rc;
1129 
1130  best.freespace = -1;
1131  VPID_SET_NULL (&best.vpid);
1132 
1133  rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex);
1134 
1135  ent = (HEAP_STATS_ENTRY *) mht_get (heap_Bestspace->vpid_ht, vpid);
1136  if (ent == NULL)
1137  {
1138  goto end;
1139  }
1140 
1141  best = ent->best;
1142 
1143 end:
1144  assert (mht_count (heap_Bestspace->vpid_ht) == mht_count (heap_Bestspace->hfid_ht));
1145 
1146  pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex);
1147 
1148  return best;
1149 }
1150 
1151 /*
1152  * Scan page buffer and latch page manipulation
1153  */
1154 
1155 /*
1156  * heap_scan_pb_lock_and_fetch () -
1157  * return:
1158  * vpid_ptr(in):
1159  * fetch_mode(in):
1160  * lock(in):
1161  * scan_cache(in):
1162  *
1163  * NOTE: Because this function is called in too many places and because it
1164  * is useful where a page was fixed for debug purpose, we pass the
1165  * caller file/line arguments to pgbuf_fix.
1166  */
1167 #if defined (NDEBUG)
1168 static PAGE_PTR
1169 heap_scan_pb_lock_and_fetch (THREAD_ENTRY * thread_p, const VPID * vpid_ptr, PAGE_FETCH_MODE fetch_mode, LOCK lock,
1170  HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * pg_watcher)
1171 #else /* !NDEBUG */
1172 static PAGE_PTR
1173 heap_scan_pb_lock_and_fetch_debug (THREAD_ENTRY * thread_p, const VPID * vpid_ptr, PAGE_FETCH_MODE fetch_mode,
1174  LOCK lock, HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * pg_watcher,
1175  const char *caller_file, const int caller_line)
1176 #endif /* !NDEBUG */
1177 {
1178  PAGE_PTR pgptr = NULL;
1179  LOCK page_lock;
1180  PGBUF_LATCH_MODE page_latch_mode;
1181 
1182  if (scan_cache != NULL)
1183  {
1184  if (scan_cache->page_latch == NULL_LOCK)
1185  {
1186  page_lock = NULL_LOCK;
1187  }
1188  else
1189  {
1190  assert (scan_cache->page_latch >= NULL_LOCK);
1191  assert (lock >= NULL_LOCK);
1192  page_lock = lock_Conv[scan_cache->page_latch][lock];
1193  assert (page_lock != NA_LOCK);
1194  }
1195  }
1196  else
1197  {
1198  page_lock = lock;
1199  }
1200 
1201  if (page_lock == S_LOCK)
1202  {
1203  page_latch_mode = PGBUF_LATCH_READ;
1204  }
1205  else
1206  {
1207  page_latch_mode = PGBUF_LATCH_WRITE;
1208  }
1209 
1210  if (pg_watcher != NULL)
1211  {
1212 #if defined (NDEBUG)
1213  if (pgbuf_ordered_fix_release (thread_p, vpid_ptr, fetch_mode, page_latch_mode, pg_watcher) != NO_ERROR)
1214 #else /* !NDEBUG */
1215  if (pgbuf_ordered_fix_debug (thread_p, vpid_ptr, fetch_mode, page_latch_mode, pg_watcher,
1216  caller_file, caller_line) != NO_ERROR)
1217 #endif /* !NDEBUG */
1218  {
1219  return NULL;
1220  }
1221  pgptr = pg_watcher->pgptr;
1222  }
1223  else
1224  {
1225 #if defined (NDEBUG)
1226  pgptr = pgbuf_fix_release (thread_p, vpid_ptr, fetch_mode, page_latch_mode, PGBUF_UNCONDITIONAL_LATCH);
1227 #else /* !NDEBUG */
1228  pgptr =
1229  pgbuf_fix_debug (thread_p, vpid_ptr, fetch_mode, page_latch_mode, PGBUF_UNCONDITIONAL_LATCH, caller_file,
1230  caller_line);
1231 #endif /* !NDEBUG */
1232  }
1233 
1234  if (pgptr != NULL)
1235  {
1236  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
1237  }
1238 
1239  return pgptr;
1240 }
1241 
1242 /*
1243  * heap_is_big_length () -
1244  * return: true/false
1245  * length(in):
1246  */
1247 bool
1249 {
1250  return (length > heap_Maxslotted_reclength) ? true : false;
1251 }
1252 
1253 /*
1254  * heap_get_spage_type () -
1255  * return: the type of the slotted page of the heap file.
1256  */
1257 static int
1258 heap_get_spage_type (void)
1259 {
1261 }
1262 
1263 /*
1264  * heap_is_reusable_oid () -
1265  * return: true if the heap file is reuse_oid table
1266  * file_type(in): the file type of the heap file
1267  */
1268 static bool
1269 heap_is_reusable_oid (const FILE_TYPE file_type)
1270 {
1271  if (file_type == FILE_HEAP)
1272  {
1273  return false;
1274  }
1275  else if (file_type == FILE_HEAP_REUSE_SLOTS)
1276  {
1277  return true;
1278  }
1279  else
1280  {
1281  assert (false);
1283  }
1284  return false;
1285 }
1286 
1287 /* TODO: STL::list for _cache.area */
1288 /*
1289  * heap_classrepr_initialize_cache () - Initialize the class representation cache
1290  * return: NO_ERROR
1291  */
1292 static int
1293 heap_classrepr_initialize_cache (void)
1294 {
1295  HEAP_CLASSREPR_ENTRY *cache_entry;
1296  HEAP_CLASSREPR_LOCK *lock_entry;
1297  HEAP_CLASSREPR_HASH *hash_entry;
1298  int i, ret = NO_ERROR;
1299  size_t size;
1300 
1301  if (heap_Classrepr != NULL)
1302  {
1303  ret = heap_classrepr_finalize_cache ();
1304  if (ret != NO_ERROR)
1305  {
1306  goto exit_on_error;
1307  }
1308  }
1309 
1310  /* initialize hash entries table */
1311  heap_Classrepr_cache.num_entries = HEAP_CLASSREPR_MAXCACHE;
1312 
1313  heap_Classrepr_cache.area =
1314  (HEAP_CLASSREPR_ENTRY *) malloc (sizeof (HEAP_CLASSREPR_ENTRY) * heap_Classrepr_cache.num_entries);
1315  if (heap_Classrepr_cache.area == NULL)
1316  {
1319  sizeof (HEAP_CLASSREPR_ENTRY) * heap_Classrepr_cache.num_entries);
1320  goto exit_on_error;
1321  }
1322 
1323  cache_entry = heap_Classrepr_cache.area;
1324  for (i = 0; i < heap_Classrepr_cache.num_entries; i++)
1325  {
1326  pthread_mutex_init (&cache_entry[i].mutex, NULL);
1327 
1328  cache_entry[i].idx = i;
1329  cache_entry[i].fcnt = 0;
1330  cache_entry[i].zone = ZONE_FREE;
1331  cache_entry[i].next_wait_thrd = NULL;
1332  cache_entry[i].hash_next = NULL;
1333  cache_entry[i].prev = NULL;
1334  cache_entry[i].next = (i < heap_Classrepr_cache.num_entries - 1) ? &cache_entry[i + 1] : NULL;
1335 
1336  cache_entry[i].force_decache = false;
1337 
1338  OID_SET_NULL (&cache_entry[i].class_oid);
1339  cache_entry[i].max_reprid = DEFAULT_REPR_INCREMENT;
1340  cache_entry[i].repr = (OR_CLASSREP **) malloc (cache_entry[i].max_reprid * sizeof (OR_CLASSREP *));
1341  if (cache_entry[i].repr == NULL)
1342  {
1344  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ret, 1, cache_entry[i].max_reprid * sizeof (OR_CLASSREP *));
1345  goto exit_on_error;
1346  }
1347  memset (cache_entry[i].repr, 0, cache_entry[i].max_reprid * sizeof (OR_CLASSREP *));
1348 
1349  cache_entry[i].last_reprid = NULL_REPRID;
1350  }
1351 
1352  /* initialize hash bucket table */
1353  heap_Classrepr_cache.num_hash = CLASSREPR_HASH_SIZE;
1354  heap_Classrepr_cache.hash_table =
1355  (HEAP_CLASSREPR_HASH *) malloc (heap_Classrepr_cache.num_hash * sizeof (HEAP_CLASSREPR_HASH));
1356  if (heap_Classrepr_cache.hash_table == NULL)
1357  {
1359  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ret, 1, heap_Classrepr_cache.num_hash * sizeof (HEAP_CLASSREPR_HASH));
1360  goto exit_on_error;
1361  }
1362 
1363  hash_entry = heap_Classrepr_cache.hash_table;
1364  for (i = 0; i < heap_Classrepr_cache.num_hash; i++)
1365  {
1366  pthread_mutex_init (&hash_entry[i].hash_mutex, NULL);
1367  hash_entry[i].idx = i;
1368  hash_entry[i].hash_next = NULL;
1369  hash_entry[i].lock_next = NULL;
1370  }
1371 
1372  /* initialize hash lock table */
1374  heap_Classrepr_cache.lock_table = (HEAP_CLASSREPR_LOCK *) malloc (size);
1375  if (heap_Classrepr_cache.lock_table == NULL)
1376  {
1379  goto exit_on_error;
1380  }
1381  lock_entry = heap_Classrepr_cache.lock_table;
1382  for (i = 0; i < (int) thread_num_total_threads (); i++)
1383  {
1384  OID_SET_NULL (&lock_entry[i].class_oid);
1385  lock_entry[i].lock_next = NULL;
1386  lock_entry[i].next_wait_thrd = NULL;
1387  }
1388 
1389  /* initialize LRU list */
1390 
1391  pthread_mutex_init (&heap_Classrepr_cache.LRU_list.LRU_mutex, NULL);
1392  heap_Classrepr_cache.LRU_list.LRU_top = NULL;
1393  heap_Classrepr_cache.LRU_list.LRU_bottom = NULL;
1394 
1395  /* initialize free list */
1396  pthread_mutex_init (&heap_Classrepr_cache.free_list.free_mutex, NULL);
1397  heap_Classrepr_cache.free_list.free_top = &heap_Classrepr_cache.area[0];
1398  heap_Classrepr_cache.free_list.free_cnt = heap_Classrepr_cache.num_entries;
1399 
1400  heap_Classrepr = &heap_Classrepr_cache;
1401 
1402  return ret;
1403 
1404 exit_on_error:
1405 
1406  heap_Classrepr_cache.num_entries = 0;
1407 
1408  return (ret == NO_ERROR) ? ER_FAILED : ret;
1409 }
1410 
1411 /* TODO: STL::list for _cache.area */
1412 /*
1413  * heap_classrepr_finalize_cache () - Destroy any cached structures
1414  * return: NO_ERROR
1415  *
1416  * Note: Any cached representations are deallocated at this moment and
1417  * the hash table is also removed.
1418  */
1419 static int
1420 heap_classrepr_finalize_cache (void)
1421 {
1422  HEAP_CLASSREPR_ENTRY *cache_entry;
1423  HEAP_CLASSREPR_HASH *hash_entry;
1424  int i, j;
1425  int ret = NO_ERROR;
1426 
1427  if (heap_Classrepr == NULL)
1428  {
1429  return NO_ERROR; /* nop */
1430  }
1431 
1432 #ifdef DEBUG_CLASSREPR_CACHE
1433  ret = heap_classrepr_dump_anyfixed ();
1434  if (ret != NO_ERROR)
1435  {
1436  return ret;
1437  }
1438 #endif /* DEBUG_CLASSREPR_CACHE */
1439 
1440  /* finalize hash entries table */
1441  cache_entry = heap_Classrepr_cache.area;
1442  for (i = 0; cache_entry != NULL && i < heap_Classrepr_cache.num_entries; i++)
1443  {
1444  pthread_mutex_destroy (&cache_entry[i].mutex);
1445 
1446  if (cache_entry[i].repr == NULL)
1447  {
1448  assert (cache_entry[i].repr != NULL);
1449  continue;
1450  }
1451 
1452  for (j = 0; j <= cache_entry[i].last_reprid; j++)
1453  {
1454  if (cache_entry[i].repr[j] != NULL)
1455  {
1456  or_free_classrep (cache_entry[i].repr[j]);
1457  cache_entry[i].repr[j] = NULL;
1458  }
1459  }
1460  free_and_init (cache_entry[i].repr);
1461  }
1462  if (heap_Classrepr_cache.area != NULL)
1463  {
1464  free_and_init (heap_Classrepr_cache.area);
1465  }
1466  heap_Classrepr_cache.num_entries = -1;
1467 
1468  /* finalize hash bucket table */
1469  hash_entry = heap_Classrepr_cache.hash_table;
1470  for (i = 0; hash_entry != NULL && i < heap_Classrepr_cache.num_hash; i++)
1471  {
1472  pthread_mutex_destroy (&hash_entry[i].hash_mutex);
1473  }
1474  heap_Classrepr_cache.num_hash = -1;
1475  if (heap_Classrepr_cache.hash_table != NULL)
1476  {
1477  free_and_init (heap_Classrepr_cache.hash_table);
1478  }
1479 
1480  /* finalize hash lock table */
1481  if (heap_Classrepr_cache.lock_table != NULL)
1482  {
1483  free_and_init (heap_Classrepr_cache.lock_table);
1484  }
1485 
1486  /* finalize LRU list */
1487 
1488  pthread_mutex_destroy (&heap_Classrepr_cache.LRU_list.LRU_mutex);
1489 
1490  /* initialize free list */
1491  pthread_mutex_destroy (&heap_Classrepr_cache.free_list.free_mutex);
1492 
1493  heap_Classrepr = NULL;
1494 
1495  return ret;
1496 }
1497 
1498 /*
1499  * heap_classrepr_entry_reset () -
1500  * return: NO_ERROR
1501  * cache_entry(in):
1502  *
1503  * Note: Reset the given class representation entry.
1504  */
1505 static int
1506 heap_classrepr_entry_reset (HEAP_CLASSREPR_ENTRY * cache_entry)
1507 {
1508  int i;
1509  int ret = NO_ERROR;
1510 
1511  if (cache_entry == NULL)
1512  {
1513  return NO_ERROR; /* nop */
1514  }
1515 
1516  /* free all classrepr */
1517  for (i = 0; i <= cache_entry->last_reprid; i++)
1518  {
1519  if (cache_entry->repr[i] != NULL)
1520  {
1521  or_free_classrep (cache_entry->repr[i]);
1522  cache_entry->repr[i] = NULL;
1523  }
1524  }
1525 
1526  cache_entry->force_decache = false;
1527  OID_SET_NULL (&cache_entry->class_oid);
1528  if (cache_entry->max_reprid > DEFAULT_REPR_INCREMENT)
1529  {
1530  OR_CLASSREP **t;
1531 
1532  t = cache_entry->repr;
1533  cache_entry->repr = (OR_CLASSREP **) malloc (DEFAULT_REPR_INCREMENT * sizeof (OR_CLASSREP *));
1534  if (cache_entry->repr == NULL)
1535  {
1538  cache_entry->repr = t;
1539  }
1540  else
1541  {
1542  free_and_init (t);
1543  cache_entry->max_reprid = DEFAULT_REPR_INCREMENT;
1544  memset (cache_entry->repr, 0, DEFAULT_REPR_INCREMENT * sizeof (OR_CLASSREP *));
1545  }
1546 
1547  }
1548  cache_entry->last_reprid = NULL_REPRID;
1549 
1550  return ret;
1551 }
1552 
1553 /*
1554  * heap_classrepr_entry_remove_from_LRU () -
1555  * return: NO_ERROR
1556  * cache_entry(in):
1557  */
1558 static int
1559 heap_classrepr_entry_remove_from_LRU (HEAP_CLASSREPR_ENTRY * cache_entry)
1560 {
1561  if (cache_entry)
1562  {
1563  if (cache_entry == heap_Classrepr_cache.LRU_list.LRU_top)
1564  {
1565  heap_Classrepr_cache.LRU_list.LRU_top = cache_entry->next;
1566  }
1567  else
1568  {
1569  cache_entry->prev->next = cache_entry->next;
1570  }
1571 
1572  if (cache_entry == heap_Classrepr_cache.LRU_list.LRU_bottom)
1573  {
1574  heap_Classrepr_cache.LRU_list.LRU_bottom = cache_entry->prev;
1575  }
1576  else
1577  {
1578  cache_entry->next->prev = cache_entry->prev;
1579  }
1580  }
1581 
1582  return NO_ERROR;
1583 }
1584 
1585 /* TODO: STL::list for ->prev */
1586 /*
1587  * heap_classrepr_decache_guessed_last () -
1588  * return: NO_ERROR
1589  * class_oid(in):
1590  *
1591  * Note: Decache the guessed last representations (i.e., that with -1)
1592  * from the given class.
1593  *
1594  * Note: This function should be called when a class is updated.
1595  * 1: During normal update
1596  */
1597 static int
1598 heap_classrepr_decache_guessed_last (const OID * class_oid)
1599 {
1600  HEAP_CLASSREPR_ENTRY *cache_entry, *prev_entry, *cur_entry;
1601  HEAP_CLASSREPR_HASH *hash_anchor;
1602  int rv;
1603  int ret = NO_ERROR;
1604 
1605  if (class_oid != NULL)
1606  {
1607  hash_anchor = &heap_Classrepr->hash_table[REPR_HASH (class_oid)];
1608 
1609  search_begin:
1610  rv = pthread_mutex_lock (&hash_anchor->hash_mutex);
1611 
1612  for (cache_entry = hash_anchor->hash_next; cache_entry != NULL; cache_entry = cache_entry->hash_next)
1613  {
1614  if (OID_EQ (class_oid, &cache_entry->class_oid))
1615  {
1616  rv = pthread_mutex_trylock (&cache_entry->mutex);
1617  if (rv == 0)
1618  {
1619  goto delete_begin;
1620  }
1621 
1622  if (rv != EBUSY)
1623  {
1626  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1627  goto exit_on_error;
1628  }
1629 
1630  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1631  rv = pthread_mutex_lock (&cache_entry->mutex);
1632 
1633  /* cache_entry can be used by others. check again */
1634  if (!OID_EQ (class_oid, &cache_entry->class_oid))
1635  {
1636  pthread_mutex_unlock (&cache_entry->mutex);
1637  goto search_begin;
1638  }
1639  break;
1640  }
1641  }
1642 
1643  /* class_oid cache_entry is not found */
1644  if (cache_entry == NULL)
1645  {
1646  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1647  goto exit_on_error;
1648  }
1649 
1650  /* hash anchor lock has been released */
1651  rv = pthread_mutex_lock (&hash_anchor->hash_mutex);
1652 
1653  delete_begin:
1654 
1655  /* delete classrepr from hash chain */
1656  prev_entry = NULL;
1657  cur_entry = hash_anchor->hash_next;
1658  while (cur_entry != NULL)
1659  {
1660  if (cur_entry == cache_entry)
1661  {
1662  break;
1663  }
1664  prev_entry = cur_entry;
1665  cur_entry = cur_entry->hash_next;
1666  }
1667 
1668  /* class_oid cache_entry is not found */
1669  if (cur_entry == NULL)
1670  {
1671  /* This cannot happen */
1672  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1673  pthread_mutex_unlock (&cache_entry->mutex);
1674 
1675  goto exit_on_error;
1676  }
1677 
1678  if (prev_entry == NULL)
1679  {
1680  hash_anchor->hash_next = cur_entry->hash_next;
1681  }
1682  else
1683  {
1684  prev_entry->hash_next = cur_entry->hash_next;
1685  }
1686  cur_entry->hash_next = NULL;
1687 
1688  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1689 
1690  cache_entry->force_decache = true;
1691 
1692  /* Remove from LRU list */
1693  if (cache_entry->zone == ZONE_LRU)
1694  {
1695  rv = pthread_mutex_lock (&heap_Classrepr_cache.LRU_list.LRU_mutex);
1696  (void) heap_classrepr_entry_remove_from_LRU (cache_entry);
1697  pthread_mutex_unlock (&heap_Classrepr_cache.LRU_list.LRU_mutex);
1698  cache_entry->zone = ZONE_VOID;
1699  }
1700  cache_entry->prev = NULL;
1701  cache_entry->next = NULL;
1702 
1703  if (cache_entry->fcnt == 0)
1704  {
1705  /* move cache_entry to free_list */
1706  ret = heap_classrepr_entry_reset (cache_entry);
1707  if (ret == NO_ERROR)
1708  {
1709  ret = heap_classrepr_entry_free (cache_entry);
1710  }
1711  }
1712 
1713  pthread_mutex_unlock (&cache_entry->mutex);
1714  }
1715 
1716  return ret;
1717 
1718 exit_on_error:
1719 
1720  return (ret == NO_ERROR) ? ER_FAILED : ret;
1721 }
1722 
1723 /*
1724  * heap_classrepr_decache () - Deache any unfixed class representations of
1725  * given class
1726  * return: NO_ERROR
1727  * class_oid(in):
1728  *
1729  * Note: Decache all class representations of given class. If a class
1730  * is not given all class representations are decached.
1731  *
1732  * Note: This function should be called when a class is updated.
1733  * 1: At the end/beginning of rollback since we do not have any
1734  * idea of a heap identifier of rolled back objects and we
1735  * expend too much time, searching for the OID, every time we
1736  * rolled back an updated object.
1737  */
1738 int
1739 heap_classrepr_decache (THREAD_ENTRY * thread_p, const OID * class_oid)
1740 {
1741  int ret;
1742 
1743  ret = heap_classrepr_decache_guessed_last (class_oid);
1744  if (ret != NO_ERROR)
1745  {
1746  return ret;
1747  }
1748 
1749  if (csect_enter (thread_p, CSECT_HEAP_CHNGUESS, INF_WAIT) != NO_ERROR)
1750  {
1751  return ER_FAILED;
1752  }
1753  if (heap_Guesschn != NULL && heap_Guesschn->schema_change == false)
1754  {
1755  ret = heap_chnguess_decache (class_oid);
1756  }
1757  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
1758 
1759  return ret;
1760 }
1761 
1762 /* TODO: STL::list for _cache.area */
1763 /*
1764  * heap_classrepr_free () - Free a class representation
1765  * return: NO_ERROR
1766  * classrep(in): The class representation structure
1767  * idx_incache(in): An index if the desired class representation is part of
1768  * the cache, otherwise -1 (no part of cache)
1769  *
1770  * Note: Free a class representation. If the class representation was
1771  * part of the class representation cache, the fix count is
1772  * decremented and the class representation will continue be
1773  * cached. The representation entry will be subject for
1774  * replacement when the fix count is zero (no one is using it).
1775  * If the class representatin was not part of the cache, it is
1776  * freed.
1777  *
1778  * NOTE: consider to use heap_classrepr_free_and_init.
1779  */
1780 int
1781 heap_classrepr_free (OR_CLASSREP * classrep, int *idx_incache)
1782 {
1783  HEAP_CLASSREPR_ENTRY *cache_entry;
1784  int rv;
1785  int ret = NO_ERROR;
1786 
1787  if (*idx_incache < 0)
1788  {
1789  or_free_classrep (classrep);
1790  return NO_ERROR;
1791  }
1792 
1793  cache_entry = &heap_Classrepr_cache.area[*idx_incache];
1794 
1795  rv = pthread_mutex_lock (&cache_entry->mutex);
1796  cache_entry->fcnt--;
1797  if (cache_entry->fcnt == 0)
1798  {
1799  /*
1800  * Is this entry declared to be decached
1801  */
1802 #ifdef DEBUG_CLASSREPR_CACHE
1803  rv = pthread_mutex_lock (&heap_Classrepr_cache.num_fix_entries_mutex);
1804  heap_Classrepr_cache.num_fix_entries--;
1805  pthread_mutex_unlock (&heap_Classrepr_cache.num_fix_entries_mutex);
1806 #endif /* DEBUG_CLASSREPR_CACHE */
1807  if (cache_entry->force_decache != 0)
1808  {
1809  /* cache_entry is already removed from LRU list. */
1810 
1811  /* move cache_entry to free_list */
1812  ret = heap_classrepr_entry_free (cache_entry);
1813  if (ret == NO_ERROR)
1814  {
1815  ret = heap_classrepr_entry_reset (cache_entry);
1816  }
1817  }
1818  else
1819  {
1820  /* relocate entry to the top of LRU list */
1821  if (cache_entry != heap_Classrepr_cache.LRU_list.LRU_top)
1822  {
1823  rv = pthread_mutex_lock (&heap_Classrepr_cache.LRU_list.LRU_mutex);
1824  if (cache_entry->zone == ZONE_LRU)
1825  {
1826  /* remove from LRU list */
1827  (void) heap_classrepr_entry_remove_from_LRU (cache_entry);
1828  }
1829 
1830  /* insert into LRU top */
1831  cache_entry->prev = NULL;
1832  cache_entry->next = heap_Classrepr_cache.LRU_list.LRU_top;
1833  if (heap_Classrepr_cache.LRU_list.LRU_top == NULL)
1834  {
1835  heap_Classrepr_cache.LRU_list.LRU_bottom = cache_entry;
1836  }
1837  else
1838  {
1839  heap_Classrepr_cache.LRU_list.LRU_top->prev = cache_entry;
1840  }
1841  heap_Classrepr_cache.LRU_list.LRU_top = cache_entry;
1842  cache_entry->zone = ZONE_LRU;
1843 
1844  pthread_mutex_unlock (&heap_Classrepr_cache.LRU_list.LRU_mutex);
1845  }
1846  }
1847  }
1848  pthread_mutex_unlock (&cache_entry->mutex);
1849  *idx_incache = -1;
1850 
1851  return ret;
1852 }
1853 
1854 #ifdef SERVER_MODE
1855 
1856 enum
1857 { NEED_TO_RETRY = 0, LOCK_ACQUIRED };
1858 
1859 /*
1860  * heap_classrepr_lock_class () - Prevent other threads accessing class_oid
1861  * class representation.
1862  * return: ER_FAILED, NEED_TO_RETRY or LOCK_ACQUIRED
1863  * hash_anchor(in):
1864  * class_oid(in):
1865  */
1866 static int
1867 heap_classrepr_lock_class (THREAD_ENTRY * thread_p, HEAP_CLASSREPR_HASH * hash_anchor, const OID * class_oid)
1868 {
1869  HEAP_CLASSREPR_LOCK *cur_lock_entry;
1870  THREAD_ENTRY *cur_thrd_entry;
1871 
1872  if (thread_p == NULL)
1873  {
1874  thread_p = thread_get_thread_entry_info ();
1875  if (thread_p == NULL)
1876  {
1877  return ER_FAILED;
1878  }
1879  }
1880  cur_thrd_entry = thread_p;
1881 
1882  for (cur_lock_entry = hash_anchor->lock_next; cur_lock_entry != NULL; cur_lock_entry = cur_lock_entry->lock_next)
1883  {
1884  if (OID_EQ (&cur_lock_entry->class_oid, class_oid))
1885  {
1886  cur_thrd_entry->next_wait_thrd = cur_lock_entry->next_wait_thrd;
1887  cur_lock_entry->next_wait_thrd = cur_thrd_entry;
1888 
1889  thread_lock_entry (cur_thrd_entry);
1890  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1892 
1893  if (cur_thrd_entry->resume_status == THREAD_HEAP_CLSREPR_RESUMED)
1894  {
1895  return NEED_TO_RETRY; /* traverse hash chain again */
1896  }
1897  else
1898  {
1899  /* probably due to an interrupt */
1900  assert ((cur_thrd_entry->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT));
1901  return ER_FAILED;
1902  }
1903  }
1904  }
1905 
1906  cur_lock_entry = &heap_Classrepr_cache.lock_table[cur_thrd_entry->index];
1907  cur_lock_entry->class_oid = *class_oid;
1908  cur_lock_entry->next_wait_thrd = NULL;
1909  cur_lock_entry->lock_next = hash_anchor->lock_next;
1910  hash_anchor->lock_next = cur_lock_entry;
1911 
1912  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1913 
1914  return LOCK_ACQUIRED; /* lock acquired. */
1915 }
1916 
1917 /*
1918  * heap_classrepr_unlock_class () -
1919  * return: NO_ERROR
1920  * hash_anchor(in):
1921  * class_oid(in):
1922  * need_hash_mutex(in):
1923  */
1924 static int
1925 heap_classrepr_unlock_class (HEAP_CLASSREPR_HASH * hash_anchor, const OID * class_oid, int need_hash_mutex)
1926 {
1927  HEAP_CLASSREPR_LOCK *prev_lock_entry, *cur_lock_entry;
1928  THREAD_ENTRY *cur_thrd_entry;
1929  int rv;
1930 
1931  /* if hash mutex lock is not acquired */
1932  if (need_hash_mutex)
1933  {
1934  rv = pthread_mutex_lock (&hash_anchor->hash_mutex);
1935  }
1936 
1937  prev_lock_entry = NULL;
1938  cur_lock_entry = hash_anchor->lock_next;
1939  while (cur_lock_entry != NULL)
1940  {
1941  if (OID_EQ (&cur_lock_entry->class_oid, class_oid))
1942  {
1943  break;
1944  }
1945  prev_lock_entry = cur_lock_entry;
1946  cur_lock_entry = cur_lock_entry->lock_next;
1947  }
1948 
1949  /* if lock entry is found, remove it from lock list */
1950  if (cur_lock_entry == NULL)
1951  { /* this cannot happen */
1952  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1953  return ER_FAILED;
1954  }
1955 
1956  if (prev_lock_entry == NULL)
1957  {
1958  hash_anchor->lock_next = cur_lock_entry->lock_next;
1959  }
1960  else
1961  {
1962  prev_lock_entry->lock_next = cur_lock_entry->lock_next;
1963  }
1964  cur_lock_entry->lock_next = NULL;
1965  pthread_mutex_unlock (&hash_anchor->hash_mutex);
1966  for (cur_thrd_entry = cur_lock_entry->next_wait_thrd; cur_thrd_entry != NULL;
1967  cur_thrd_entry = cur_lock_entry->next_wait_thrd)
1968  {
1969  cur_lock_entry->next_wait_thrd = cur_thrd_entry->next_wait_thrd;
1970  cur_thrd_entry->next_wait_thrd = NULL;
1971 
1972  thread_wakeup (cur_thrd_entry, THREAD_HEAP_CLSREPR_RESUMED);
1973  }
1974 
1975  return NO_ERROR;
1976 }
1977 #endif /* SERVER_MODE */
1978 
1979 /* TODO: STL::list for ->prev */
1980 /*
1981  * heap_classrepr_entry_alloc () -
1982  * return:
1983  */
1984 static HEAP_CLASSREPR_ENTRY *
1985 heap_classrepr_entry_alloc (void)
1986 {
1987  HEAP_CLASSREPR_HASH *hash_anchor;
1988  HEAP_CLASSREPR_ENTRY *cache_entry, *prev_entry, *cur_entry;
1989  int rv;
1990 
1991  cache_entry = NULL;
1992 
1993 /* check_free_list: */
1994 
1995  /* 1. Get entry from free list */
1996  if (heap_Classrepr_cache.free_list.free_top == NULL)
1997  {
1998  goto check_LRU_list;
1999  }
2000 
2001  rv = pthread_mutex_lock (&heap_Classrepr_cache.free_list.free_mutex);
2002  if (heap_Classrepr_cache.free_list.free_top == NULL)
2003  {
2004  pthread_mutex_unlock (&heap_Classrepr_cache.free_list.free_mutex);
2005  cache_entry = NULL;
2006  }
2007  else
2008  {
2009  cache_entry = heap_Classrepr_cache.free_list.free_top;
2010  heap_Classrepr_cache.free_list.free_top = cache_entry->next;
2011  heap_Classrepr_cache.free_list.free_cnt--;
2012  pthread_mutex_unlock (&heap_Classrepr_cache.free_list.free_mutex);
2013 
2014  rv = pthread_mutex_lock (&cache_entry->mutex);
2015  cache_entry->next = NULL;
2016  cache_entry->zone = ZONE_VOID;
2017 
2018  return cache_entry;
2019  }
2020 
2021 check_LRU_list:
2022  /* 2. Get entry from LRU list */
2023  if (heap_Classrepr_cache.LRU_list.LRU_bottom == NULL)
2024  {
2025  goto expand_list;
2026  }
2027 
2028  rv = pthread_mutex_lock (&heap_Classrepr_cache.LRU_list.LRU_mutex);
2029  for (cache_entry = heap_Classrepr_cache.LRU_list.LRU_bottom; cache_entry != NULL; cache_entry = cache_entry->prev)
2030  {
2031  if (cache_entry->fcnt == 0)
2032  {
2033  /* remove from LRU list */
2034  (void) heap_classrepr_entry_remove_from_LRU (cache_entry);
2035  cache_entry->zone = ZONE_VOID;
2036  cache_entry->next = cache_entry->prev = NULL;
2037  break;
2038  }
2039  }
2040  pthread_mutex_unlock (&heap_Classrepr_cache.LRU_list.LRU_mutex);
2041 
2042  if (cache_entry == NULL)
2043  {
2044  goto expand_list;
2045  }
2046 
2047  rv = pthread_mutex_lock (&cache_entry->mutex);
2048  /* if some has referenced, retry */
2049  if (cache_entry->fcnt != 0)
2050  {
2051  pthread_mutex_unlock (&cache_entry->mutex);
2052  goto check_LRU_list;
2053  }
2054 
2055  /* delete classrepr from hash chain */
2056  hash_anchor = &heap_Classrepr->hash_table[REPR_HASH (&cache_entry->class_oid)];
2057  rv = pthread_mutex_lock (&hash_anchor->hash_mutex);
2058  prev_entry = NULL;
2059  cur_entry = hash_anchor->hash_next;
2060  while (cur_entry != NULL)
2061  {
2062  if (cur_entry == cache_entry)
2063  {
2064  break;
2065  }
2066  prev_entry = cur_entry;
2067  cur_entry = cur_entry->hash_next;
2068  }
2069 
2070  if (cur_entry == NULL)
2071  {
2072  /* This cannot happen */
2073  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2074  pthread_mutex_unlock (&cache_entry->mutex);
2075 
2076  return NULL;
2077  }
2078  if (prev_entry == NULL)
2079  {
2080  hash_anchor->hash_next = cur_entry->hash_next;
2081  }
2082  else
2083  {
2084  prev_entry->hash_next = cur_entry->hash_next;
2085  }
2086  cur_entry->hash_next = NULL;
2087  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2088 
2089  (void) heap_classrepr_entry_reset (cache_entry);
2090 
2091 end:
2092 
2093  return cache_entry;
2094 
2095 expand_list:
2096 
2097  /* not supported */
2098  cache_entry = NULL;
2099  goto end;
2100 }
2101 
2102 /* TODO: STL::list for ->next */
2103 /*
2104  * heap_classrepr_entry_free () -
2105  * return: NO_ERROR
2106  * cache_entry(in):
2107  */
2108 static int
2109 heap_classrepr_entry_free (HEAP_CLASSREPR_ENTRY * cache_entry)
2110 {
2111  int rv;
2112  rv = pthread_mutex_lock (&heap_Classrepr_cache.free_list.free_mutex);
2113 
2114  cache_entry->next = heap_Classrepr_cache.free_list.free_top;
2115  heap_Classrepr_cache.free_list.free_top = cache_entry;
2116  cache_entry->zone = ZONE_FREE;
2117  heap_Classrepr_cache.free_list.free_cnt++;
2118 
2119  pthread_mutex_unlock (&heap_Classrepr_cache.free_list.free_mutex);
2120 
2121  return NO_ERROR;
2122 }
2123 
2124 /*
2125  * heap_classrepr_get_from_record ()
2126  * return: classrepr
2127  *
2128  * last_reprid(out):
2129  * class_oid(in): The class identifier
2130  * class_recdes(in): The class recdes (when know) or NULL
2131  * reprid(in): Representation of the class or NULL_REPRID for last one
2132  */
2133 static OR_CLASSREP *
2134 heap_classrepr_get_from_record (THREAD_ENTRY * thread_p, REPR_ID * last_reprid, const OID * class_oid,
2135  RECDES * class_recdes, REPR_ID reprid)
2136 {
2137  RECDES peek_recdes;
2138  RECDES *recdes = NULL;
2139  HEAP_SCANCACHE scan_cache;
2140  OR_CLASSREP *repr = NULL;
2141 
2142  if (last_reprid != NULL)
2143  {
2144  *last_reprid = NULL_REPRID;
2145  }
2146 
2147  if (class_recdes != NULL)
2148  {
2149  recdes = class_recdes;
2150  }
2151  else
2152  {
2153  heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
2154  if (heap_get_class_record (thread_p, class_oid, &peek_recdes, &scan_cache, PEEK) != S_SUCCESS)
2155  {
2156  goto end;
2157  }
2158  recdes = &peek_recdes;
2159  }
2160 
2161  repr = or_get_classrep (recdes, reprid);
2162  if (last_reprid != NULL)
2163  {
2164  *last_reprid = or_rep_id (recdes);
2165  }
2166 
2167 end:
2168  if (class_recdes == NULL)
2169  {
2170  heap_scancache_end (thread_p, &scan_cache);
2171  }
2172  return repr;
2173 }
2174 
2175 /*
2176  * heap_classrepr_get () - Obtain the desired class representation
2177  * return: classrepr
2178  * class_oid(in): The class identifier
2179  * class_recdes(in): The class recdes (when know) or NULL
2180  * reprid(in): Representation of the class or NULL_REPRID for last one
2181  * idx_incache(in): An index if the desired class representation is part
2182  * of the cache
2183  *
2184  * Note: Obtain the desired class representation for the given class.
2185  */
2186 OR_CLASSREP *
2187 heap_classrepr_get (THREAD_ENTRY * thread_p, const OID * class_oid, RECDES * class_recdes, REPR_ID reprid,
2188  int *idx_incache)
2189 {
2190  HEAP_CLASSREPR_ENTRY *cache_entry;
2191  HEAP_CLASSREPR_HASH *hash_anchor;
2192  OR_CLASSREP *repr = NULL;
2193  OR_CLASSREP *repr_from_record = NULL;
2194  OR_CLASSREP *repr_last = NULL;
2195  REPR_ID last_reprid;
2196  int r;
2197 
2198  *idx_incache = -1;
2199 
2200  hash_anchor = &heap_Classrepr->hash_table[REPR_HASH (class_oid)];
2201 
2202  /* search entry with class_oid from hash chain */
2203 search_begin:
2204  r = pthread_mutex_lock (&hash_anchor->hash_mutex);
2205 
2206  for (cache_entry = hash_anchor->hash_next; cache_entry != NULL; cache_entry = cache_entry->hash_next)
2207  {
2208  if (OID_EQ (class_oid, &cache_entry->class_oid))
2209  {
2210  r = pthread_mutex_trylock (&cache_entry->mutex);
2211  if (r == 0)
2212  {
2213  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2214  }
2215  else
2216  {
2217  if (r != EBUSY)
2218  {
2219  /* some error code */
2221  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2222  goto exit;
2223  }
2224  /* if cache_entry lock is busy. release hash mutex lock and lock cache_entry lock unconditionally */
2225  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2226  r = pthread_mutex_lock (&cache_entry->mutex);
2227  }
2228  /* check if cache_entry is used by others */
2229  if (!OID_EQ (class_oid, &cache_entry->class_oid))
2230  {
2231  pthread_mutex_unlock (&cache_entry->mutex);
2232  goto search_begin;
2233  }
2234 
2235  break;
2236  }
2237  }
2238 
2239  if (cache_entry == NULL)
2240  {
2241  if (repr_from_record == NULL)
2242  {
2243  /* note: we need to read class record from heap page. however, latching a page and holding mutex is never a
2244  * good idea, and it can generate ugly deadlocks. but in most cases, we won't have concurrency here,
2245  * so let's try a conditional latch on page of class. if that doesn't work, release the hash mutex,
2246  * read representation from heap and restart the process to ensure consistency. */
2247  VPID vpid_of_class;
2248  PAGE_PTR page_of_class = NULL;
2249  VPID_GET_FROM_OID (&vpid_of_class, class_oid);
2250  page_of_class = pgbuf_fix (thread_p, &vpid_of_class, OLD_PAGE, PGBUF_LATCH_READ, PGBUF_CONDITIONAL_LATCH);
2251  if (page_of_class == NULL)
2252  {
2253  /* we cannot hold mutex */
2254  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2255  }
2256  else if (spage_get_record_type (page_of_class, class_oid->slotid) != REC_HOME)
2257  {
2258  /* things get too complicated when we need to do ordered fix. */
2259  pgbuf_unfix_and_init (thread_p, page_of_class);
2260  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2261  }
2262  repr_from_record = heap_classrepr_get_from_record (thread_p, &last_reprid, class_oid, class_recdes, reprid);
2263  if (repr_from_record == NULL)
2264  {
2265  ASSERT_ERROR ();
2266 
2267  if (page_of_class != NULL)
2268  {
2269  pthread_mutex_unlock (&hash_anchor->hash_mutex);
2270  pgbuf_unfix_and_init (thread_p, page_of_class);
2271  }
2272  goto exit;
2273  }
2274  if (reprid == NULL_REPRID)
2275  {
2276  reprid = last_reprid;
2277  }
2278  if (reprid != last_reprid && repr_last == NULL)
2279  {
2280  repr_last = heap_classrepr_get_from_record (thread_p, &last_reprid, class_oid, class_recdes, last_reprid);
2281  if (repr_last == NULL)
2282  {
2283  /* can we accept this case? */
2284  }
2285  }
2286  if (page_of_class == NULL)
2287  {
2288  /* hash mutex was released, we need to restart search. */
2289  goto search_begin;
2290  }
2291  else
2292  {
2293  pgbuf_unfix_and_init (thread_p, page_of_class);
2294  /* hash mutex was kept */
2295  /* fall through */
2296  }
2297  }
2298  assert (repr_from_record != NULL);
2299  assert (last_reprid != NULL_REPRID);
2300 
2301 #ifdef SERVER_MODE
2302  /* class_oid was not found. Lock class_oid. heap_classrepr_lock_class () release hash_anchor->hash_lock */
2303  r = heap_classrepr_lock_class (thread_p, hash_anchor, class_oid);
2304  if (r != LOCK_ACQUIRED)
2305  {
2306  if (r == NEED_TO_RETRY)
2307  {
2308  goto search_begin;
2309  }
2310  else
2311  {
2312  assert (r == ER_FAILED);
2313  goto exit;
2314  }
2315  }
2316 #endif
2317 
2318  /* Get free entry */
2319  cache_entry = heap_classrepr_entry_alloc ();
2320  if (cache_entry == NULL)
2321  {
2322  /* if all cache entry is busy, return disk repr. */
2323 
2324 #ifdef SERVER_MODE
2325  /* free lock for class_oid */
2326  (void) heap_classrepr_unlock_class (hash_anchor, class_oid, true);
2327 #endif
2328 
2329  if (repr_last != NULL)
2330  {
2331  or_free_classrep (repr_last);
2332  }
2333 
2334  /* return disk repr when repr cache is full */
2335  return repr_from_record;
2336  }
2337 
2338  /* check if cache_entry->repr[last_reprid] is valid. */
2339  if (last_reprid >= cache_entry->max_reprid)
2340  {
2341  free_and_init (cache_entry->repr);
2342 
2343  cache_entry->repr = (OR_CLASSREP **) malloc ((last_reprid + 1) * sizeof (OR_CLASSREP *));
2344  if (cache_entry->repr == NULL)
2345  {
2347  (last_reprid + 1) * sizeof (OR_CLASSREP *));
2348 
2349  pthread_mutex_unlock (&cache_entry->mutex);
2350  (void) heap_classrepr_entry_free (cache_entry);
2351 #ifdef SERVER_MODE
2352  (void) heap_classrepr_unlock_class (hash_anchor, class_oid, true);
2353 #endif
2354  if (repr != NULL)
2355  {
2356  or_free_classrep (repr);
2357  repr = NULL;
2358  }
2359  goto exit;
2360  }
2361  cache_entry->max_reprid = last_reprid + 1;
2362 
2363  memset (cache_entry->repr, 0, cache_entry->max_reprid * sizeof (OR_CLASSREP *));
2364  }
2365 
2366  if (reprid <= NULL_REPRID || reprid > last_reprid || reprid > cache_entry->max_reprid)
2367  {
2368  assert (false);
2369 
2370  pthread_mutex_unlock (&cache_entry->mutex);
2371  (void) heap_classrepr_entry_free (cache_entry);
2372 #ifdef SERVER_MODE
2373  (void) heap_classrepr_unlock_class (hash_anchor, class_oid, true);
2374 #endif
2375 
2376  if (repr != NULL)
2377  {
2378  or_free_classrep (repr);
2379  repr = NULL;
2380  }
2381 
2383  goto exit;
2384  }
2385 
2386  cache_entry->repr[reprid] = repr_from_record;
2387  repr = cache_entry->repr[reprid];
2388  repr_from_record = NULL;
2389  cache_entry->last_reprid = last_reprid;
2390  if (reprid != last_reprid)
2391  { /* if last repr is not cached */
2392  /* normally, we should not access heap record while keeping mutex in cache entry. however, this entry was not
2393  * yet attached to cache, so no one will get its mutex yet */
2394  cache_entry->repr[last_reprid] = repr_last;
2395  repr_last = NULL;
2396  }
2397 
2398  cache_entry->fcnt = 1;
2399  cache_entry->class_oid = *class_oid;
2400 #ifdef DEBUG_CLASSREPR_CACHE
2401  r = pthread_mutex_lock (&heap_Classrepr_cache.num_fix_entries_mutex);
2402  heap_Classrepr_cache.num_fix_entries++;
2403  pthread_mutex_unlock (&heap_Classrepr_cache.num_fix_entries_mutex);
2404 
2405 #endif /* DEBUG_CLASSREPR_CACHE */
2406  *idx_incache = cache_entry->idx;
2407 
2408  /* Add to hash chain, and remove lock for class_oid */
2409  r = pthread_mutex_lock (&hash_anchor->hash_mutex);
2410  cache_entry->hash_next = hash_anchor->hash_next;
2411  hash_anchor->hash_next = cache_entry;
2412 
2413 #ifdef SERVER_MODE
2414  (void) heap_classrepr_unlock_class (hash_anchor, class_oid, false);
2415 #endif
2416  }
2417  else
2418  {
2419  /* now, we have already cache_entry for class_oid. if it contains repr info for reprid, return it. else load
2420  * classrepr info for it */
2421 
2422  if (reprid == NULL_REPRID)
2423  {
2424  reprid = cache_entry->last_reprid;
2425  }
2426 
2427  if (reprid <= NULL_REPRID || reprid > cache_entry->last_reprid || reprid > cache_entry->max_reprid)
2428  {
2429  assert (false);
2430 
2431  pthread_mutex_unlock (&cache_entry->mutex);
2432 
2434  goto exit;
2435  }
2436 
2437  /* reprid cannot be greater than cache_entry->last_reprid. */
2438  repr = cache_entry->repr[reprid];
2439  if (repr == NULL)
2440  {
2441  /* load repr. info. for reprid of class_oid */
2442  if (repr_from_record == NULL)
2443  {
2444  /* we need to read record from its page. we cannot hold cache mutex and latch a page. */
2445  pthread_mutex_unlock (&cache_entry->mutex);
2446  repr_from_record =
2447  heap_classrepr_get_from_record (thread_p, &last_reprid, class_oid, class_recdes, reprid);
2448  if (repr_from_record == NULL)
2449  {
2450  goto exit;
2451  }
2452  /* we need to start over */
2453  goto search_begin;
2454  }
2455  else
2456  {
2457  /* use load representation from record */
2458  cache_entry->repr[reprid] = repr_from_record;
2459  repr = repr_from_record;
2460  repr_from_record = NULL;
2461 
2462  /* fall through */
2463  }
2464  }
2465 
2466  cache_entry->fcnt++;
2467  *idx_incache = cache_entry->idx;
2468  }
2469  pthread_mutex_unlock (&cache_entry->mutex);
2470 
2471 exit:
2472  if (repr_from_record != NULL)
2473  {
2474  or_free_classrep (repr_from_record);
2475  }
2476  if (repr_last != NULL)
2477  {
2478  or_free_classrep (repr_last);
2479  }
2480  return repr;
2481 }
2482 
2483 #ifdef DEBUG_CLASSREPR_CACHE
2484 /*
2485  * heap_classrepr_dump_cache () - Dump the class representation cache
2486  * return: NO_ERROR
2487  * simple_dump(in):
2488  *
2489  * Note: Dump the class representation cache.
2490  */
2491 static int
2492 heap_classrepr_dump_cache (bool simple_dump)
2493 {
2494  OR_CLASSREP *classrepr;
2495  HEAP_CLASSREPR_ENTRY *cache_entry;
2496  int i, j;
2497  int rv;
2498  int ret = NO_ERROR;
2499 
2500  if (heap_Classrepr == NULL)
2501  {
2502  return NO_ERROR; /* nop */
2503  }
2504 
2505  (void) fflush (stderr);
2506  (void) fflush (stdout);
2507 
2508  fprintf (stdout, "*** Class Representation cache dump *** \n");
2509  fprintf (stdout, " Number of entries = %d, Number of used entries = %d\n", heap_Classrepr->num_entries,
2510  heap_Classrepr->num_entries - heap_Classrepr->free_list.free_cnt);
2511 
2512  for (cache_entry = heap_Classrepr->area, i = 0; i < heap_Classrepr->num_entries; cache_entry++, i++)
2513  {
2514  fprintf (stdout, " \nEntry_id %d\n", cache_entry->idx);
2515 
2516  rv = pthread_mutex_lock (&cache_entry->mutex);
2517  for (j = 0; j <= cache_entry->last_reprid; j++)
2518  {
2519  classrepr = cache_entry->repr[j];
2520  if (classrepr == NULL)
2521  {
2522  fprintf (stdout, ".....\n");
2523  continue;
2524  }
2525  fprintf (stdout, " Fix count = %d, force_decache = %d\n", cache_entry->fcnt, cache_entry->force_decache);
2526 
2527  if (simple_dump == true)
2528  {
2529  fprintf (stdout, " Class_oid = %d|%d|%d, Reprid = %d\n", (int) cache_entry->class_oid.volid,
2530  cache_entry->class_oid.pageid, (int) cache_entry->class_oid.slotid, cache_entry->repr[j]->id);
2531  fprintf (stdout, " Representation address = %p\n", classrepr);
2532 
2533  }
2534  else
2535  {
2536  ret = heap_classrepr_dump (&cache_entry->class_oid, classrepr);
2537  }
2538  }
2539 
2540  pthread_mutex_unlock (&cache_entry->mutex);
2541  }
2542 
2543  return ret;
2544 }
2545 #endif /* DEBUG_CLASSREPR_CACHE */
2546 
2547 /*
2548  * heap_classrepr_dump () - Dump schema of a given class representation
2549  * return: NO_ERROR
2550  * class_oid(in):
2551  * repr(in): The class representation
2552  *
2553  * Note: Dump the class representation cache.
2554  */
2555 static int
2556 heap_classrepr_dump (THREAD_ENTRY * thread_p, FILE * fp, const OID * class_oid, const OR_CLASSREP * repr)
2557 {
2558  OR_ATTRIBUTE *volatile attrepr;
2559  volatile int i;
2560  int k, j;
2561  char *classname;
2562  const char *attr_name;
2563  DB_VALUE def_dbvalue;
2564  PR_TYPE *pr_type;
2565  int disk_length;
2566  OR_BUF buf;
2567  bool copy;
2568  RECDES recdes = RECDES_INITIALIZER; /* Used to obtain attrnames */
2569  int ret = NO_ERROR;
2570  char *index_name = NULL;
2571  char *string = NULL;
2572  int alloced_string = 0;
2573  HEAP_SCANCACHE scan_cache;
2574 
2575  /*
2576  * The class is fetched to print the attribute names.
2577  *
2578  * This is needed since the name of the attributes is not contained
2579  * in the class representation structure.
2580  */
2581  (void) heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
2582 
2583  if (repr == NULL)
2584  {
2585  goto exit_on_error;
2586  }
2587 
2588  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, COPY) != S_SUCCESS)
2589  {
2590  goto exit_on_error;
2591  }
2592 
2593  classname = or_class_name (&recdes);
2594  assert (classname != NULL);
2595 
2596  fprintf (fp, "\n");
2597  fprintf (fp,
2598  " Class-OID = %d|%d|%d, Classname = %s, reprid = %d,\n"
2599  " Attrs: Tot = %d, Nfix = %d, Nvar = %d, Nshare = %d, Nclass = %d,\n Total_length_of_fixattrs = %d\n",
2600  (int) class_oid->volid, class_oid->pageid, (int) class_oid->slotid, classname, repr->id, repr->n_attributes,
2601  (repr->n_attributes - repr->n_variable - repr->n_shared_attrs - repr->n_class_attrs), repr->n_variable,
2602  repr->n_shared_attrs, repr->n_class_attrs, repr->fixed_length);
2603 
2604  if (repr->n_attributes > 0)
2605  {
2606  fprintf (fp, "\n");
2607  fprintf (fp, " Attribute Specifications:\n");
2608  }
2609 
2610  for (i = 0, attrepr = repr->attributes; i < repr->n_attributes; i++, attrepr++)
2611  {
2612  string = NULL;
2613  alloced_string = 0;
2614  ret = or_get_attrname (&recdes, attrepr->id, &string, &alloced_string);
2615  if (ret != NO_ERROR)
2616  {
2617  ASSERT_ERROR ();
2618  goto exit_on_error;
2619  }
2620 
2621  attr_name = string;
2622  if (attr_name == NULL)
2623  {
2624  attr_name = "?????";
2625  }
2626 
2627  fprintf (fp, "\n Attrid = %d, Attrname = %s, type = %s,\n location = %d, position = %d,\n", attrepr->id,
2628  attr_name, pr_type_name (attrepr->type), attrepr->location, attrepr->position);
2629 
2630  if (string != NULL && alloced_string == 1)
2631  {
2632  db_private_free_and_init (thread_p, string);
2633  }
2634 
2635  if (!OID_ISNULL (&attrepr->classoid) && !OID_EQ (&attrepr->classoid, class_oid))
2636  {
2637  if (heap_get_class_name (thread_p, &attrepr->classoid, &classname) != NO_ERROR || classname == NULL)
2638  {
2639  ASSERT_ERROR_AND_SET (ret);
2640  goto exit_on_error;
2641  }
2642  fprintf (fp, " Inherited from Class: oid = %d|%d|%d, Name = %s\n", (int) attrepr->classoid.volid,
2643  attrepr->classoid.pageid, (int) attrepr->classoid.slotid, classname);
2644  free_and_init (classname);
2645  }
2646 
2647  if (attrepr->n_btids > 0)
2648  {
2649  fprintf (fp, " Number of Btids = %d,\n", attrepr->n_btids);
2650  for (k = 0; k < attrepr->n_btids; k++)
2651  {
2652  index_name = NULL;
2653  /* find index_name */
2654  for (j = 0; j < repr->n_indexes; ++j)
2655  {
2656  if (BTID_IS_EQUAL (&(repr->indexes[j].btid), &(attrepr->btids[k])))
2657  {
2658  index_name = repr->indexes[j].btname;
2659  break;
2660  }
2661  }
2662 
2663  fprintf (fp, " BTID: VFID %d|%d, Root_PGID %d, %s\n", (int) attrepr->btids[k].vfid.volid,
2664  attrepr->btids[k].vfid.fileid, attrepr->btids[k].root_pageid,
2665  (index_name == NULL) ? "unknown" : index_name);
2666  }
2667  }
2668 
2669  /*
2670  * Dump the default value if any.
2671  */
2672  fprintf (fp, " Default disk value format:\n");
2673  fprintf (fp, " length = %d, value = ", attrepr->default_value.val_length);
2674 
2675  if (attrepr->default_value.val_length <= 0)
2676  {
2677  fprintf (fp, "NULL");
2678  }
2679  else
2680  {
2681  or_init (&buf, (char *) attrepr->default_value.value, attrepr->default_value.val_length);
2682  buf.error_abort = 1;
2683 
2684  switch (_setjmp (buf.env))
2685  {
2686  case 0:
2687  /* Do not copy the string--just use the pointer. The pr_ routines for strings and sets have different
2688  * semantics for length. A negative length value for strings means "don't copy the string, just use the
2689  * pointer". */
2690 
2691  disk_length = attrepr->default_value.val_length;
2692  copy = (pr_is_set_type (attrepr->type)) ? true : false;
2693  pr_type = PR_TYPE_FROM_ID (attrepr->type);
2694  if (pr_type)
2695  {
2696  (*(pr_type->data_readval)) (&buf, &def_dbvalue, attrepr->domain, disk_length, copy, NULL, 0);
2697 
2698  db_value_fprint (stdout, &def_dbvalue);
2699  (void) pr_clear_value (&def_dbvalue);
2700  }
2701  else
2702  {
2703  fprintf (fp, "PR_TYPE is NULL");
2704  }
2705  break;
2706  default:
2707  /*
2708  * An error was found during the reading of the attribute value
2709  */
2710  fprintf (fp, "Error transforming the default value\n");
2711  break;
2712  }
2713  }
2714  fprintf (fp, "\n");
2715  }
2716 
2717  (void) heap_scancache_end (thread_p, &scan_cache);
2718 
2719  return ret;
2720 
2721 exit_on_error:
2722 
2723  (void) heap_scancache_end (thread_p, &scan_cache);
2724 
2725  fprintf (fp, "Dump has been aborted...");
2726 
2727  return (ret == NO_ERROR) ? ER_FAILED : ret;
2728 }
2729 
2730 #ifdef DEBUG_CLASSREPR_CACHE
2731 /*
2732  * heap_classrepr_dump_anyfixed() - Dump class representation cache if
2733  * any entry is fixed
2734  * return: NO_ERROR
2735  *
2736  * Note: The class representation cache is dumped if any cache entry is fixed
2737  *
2738  * This is a debugging function that can be used to verify if
2739  * entries were freed after a set of operations (e.g., a
2740  * transaction or a API function).
2741  *
2742  * Note:
2743  * This function will not give you good results when there are
2744  * multiple users in the system (multiprocessing). However, it
2745  * can be used during shuttdown.
2746  */
2747 int
2748 heap_classrepr_dump_anyfixed (void)
2749 {
2750  int ret = NO_ERROR;
2751 
2752  if (heap_Classrepr->num_fix_entries > 0)
2753  {
2754  er_log_debug (ARG_FILE_LINE, "heap_classrepr_dump_anyfixed: Some entries are fixed\n");
2755  ret = heap_classrepr_dump_cache (true);
2756  }
2757 
2758  return ret;
2759 }
2760 #endif /* DEBUG_CLASSREPR_CACHE */
2761 
2762 /*
2763  * heap_stats_get_min_freespace () - Minimal space to consider a page for statistics
2764  * return: int minspace
2765  * heap_hdr(in): Current header of heap
2766  *
2767  * Note: Find the minimal space to consider to continue caching a page
2768  * for statistics.
2769  */
2770 static int
2771 heap_stats_get_min_freespace (HEAP_HDR_STATS * heap_hdr)
2772 {
2773  int min_freespace;
2774  int header_size;
2775 
2776  header_size = OR_MVCC_MAX_HEADER_SIZE;
2777 
2778  /*
2779  * Don't cache as a good space page if page does not have at least
2780  * unfill_space + one record
2781  */
2782 
2783  if (heap_hdr->estimates.num_recs > 0)
2784  {
2785  min_freespace = (int) (heap_hdr->estimates.recs_sumlen / heap_hdr->estimates.num_recs);
2786 
2787  if (min_freespace < (header_size + 20))
2788  {
2789  min_freespace = header_size + 20; /* Assume very small records */
2790  }
2791  }
2792  else
2793  {
2794  min_freespace = header_size + 20; /* Assume very small records */
2795  }
2796 
2797  min_freespace += heap_hdr->unfill_space;
2798 
2799  min_freespace = MIN (min_freespace, HEAP_DROP_FREE_SPACE);
2800 
2801  return min_freespace;
2802 }
2803 
2804 /*
2805  * heap_stats_update () - Update one header hinted page space statistics
2806  * return: NO_ERROR
2807  * pgptr(in): Page pointer
2808  * hfid(in): Object heap file identifier
2809  * prev_freespace(in):
2810  *
2811  * NOTE: There should be at least HEAP_DROP_FREE_SPACE in order to
2812  * insert this page to best hint array.
2813  * If we cannot fix a heap header page due to holding it by
2814  * others, we will postpone this updating until next deletion.
2815  * In this case, unfortunately, if some record is not deleted
2816  * from this page in the future, we may not use this page until
2817  * heap_stats_sync_bestspace function searches all pages.
2818  */
2819 void
2820 heap_stats_update (THREAD_ENTRY * thread_p, PAGE_PTR pgptr, const HFID * hfid, int prev_freespace)
2821 {
2822  VPID *vpid;
2823  int freespace, error;
2824  bool need_update;
2825 
2826  freespace = spage_get_free_space_without_saving (thread_p, pgptr, &need_update);
2828  {
2829  if (prev_freespace < freespace)
2830  {
2831  vpid = pgbuf_get_vpid_ptr (pgptr);
2832  assert_release (vpid != NULL);
2833 
2834  (void) heap_stats_add_bestspace (thread_p, hfid, vpid, freespace);
2835  }
2836  }
2837 
2838  if (need_update || prev_freespace <= HEAP_DROP_FREE_SPACE)
2839  {
2840  if (freespace > HEAP_DROP_FREE_SPACE)
2841  {
2842  vpid = pgbuf_get_vpid_ptr (pgptr);
2843  assert_release (vpid != NULL);
2844 
2845  error = heap_stats_update_internal (thread_p, hfid, vpid, freespace);
2846  if (error != NO_ERROR)
2847  {
2848  spage_set_need_update_best_hint (thread_p, pgptr, true);
2849  }
2850  else if (need_update == true)
2851  {
2852  spage_set_need_update_best_hint (thread_p, pgptr, false);
2853  }
2854  }
2855  else if (need_update == true)
2856  {
2857  spage_set_need_update_best_hint (thread_p, pgptr, false);
2858  }
2859  }
2860 }
2861 
2862 /*
2863  * heap_stats_update_internal () - Update one header hinted page space statistics
2864  * return: NO_ERROR
2865  * hfid(in): Object heap file identifier
2866  * lotspace_vpid(in): Page which has a lot of free space
2867  * free_space(in): The free space on the page
2868  *
2869  * Note: Update header hinted best space page information. This
2870  * function is used during deletions and updates when the free
2871  * space on the page is greater than HEAP_DROP_FREE_SPACE.
2872  */
2873 static int
2874 heap_stats_update_internal (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * lotspace_vpid, int free_space)
2875 {
2876  HEAP_HDR_STATS *heap_hdr; /* Header of heap structure */
2877  PAGE_PTR hdr_pgptr = NULL; /* Page pointer to header page */
2878  VPID vpid; /* Page-volume identifier */
2879  RECDES recdes; /* Header record descriptor */
2880  LOG_DATA_ADDR addr; /* Address of logging data */
2881  int i, best;
2882  int ret = NO_ERROR;
2883 
2884  /* Retrieve the header of heap */
2885  vpid.volid = hfid->vfid.volid;
2886  vpid.pageid = hfid->hpgid;
2887 
2888  /*
2889  * We do not want to wait for the following operation.
2890  * So, if we cannot lock the page return.
2891  */
2892  hdr_pgptr = pgbuf_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
2893  if (hdr_pgptr == NULL)
2894  {
2895  /* Page is busy or other type of error */
2896  goto exit_on_error;
2897  }
2898 
2899  (void) pgbuf_check_page_ptype (thread_p, hdr_pgptr, PAGE_HEAP);
2900 
2901  /*
2902  * Peek the header record to find statistics for insertion.
2903  * Update the statistics directly.
2904  */
2905  if (spage_get_record (thread_p, hdr_pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
2906  {
2907  goto exit_on_error;
2908  }
2909 
2910  heap_hdr = (HEAP_HDR_STATS *) recdes.data;
2911  best = heap_hdr->estimates.head;
2912 
2913  if (free_space >= heap_stats_get_min_freespace (heap_hdr))
2914  {
2915  /*
2916  * We do not compare with the current stored values since these values
2917  * may not be accurate at all. When the given one is supposed to be
2918  * accurate.
2919  */
2920 
2921  /*
2922  * Find a good place to insert this page
2923  */
2924  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
2925  {
2926  if (VPID_ISNULL (&heap_hdr->estimates.best[best].vpid)
2927  || heap_hdr->estimates.best[best].freespace <= HEAP_DROP_FREE_SPACE)
2928  {
2929  break;
2930  }
2931 
2932  best = HEAP_STATS_NEXT_BEST_INDEX (best);
2933  }
2934 
2935  if (VPID_ISNULL (&heap_hdr->estimates.best[best].vpid))
2936  {
2937  heap_hdr->estimates.num_high_best++;
2939  }
2940  else if (heap_hdr->estimates.best[best].freespace > HEAP_DROP_FREE_SPACE)
2941  {
2942  heap_hdr->estimates.num_other_high_best++;
2943 
2944  heap_stats_put_second_best (heap_hdr, &heap_hdr->estimates.best[best].vpid);
2945  }
2946  /*
2947  * Now substitute the entry with the new information
2948  */
2949 
2950  heap_hdr->estimates.best[best].freespace = free_space;
2951  heap_hdr->estimates.best[best].vpid = *lotspace_vpid;
2952 
2953  heap_hdr->estimates.head = HEAP_STATS_NEXT_BEST_INDEX (best);
2954 
2955  /*
2956  * The changes to the statistics are not logged. They are fixed
2957  * automatically sooner or later
2958  */
2959 
2960  addr.vfid = &hfid->vfid;
2961  addr.pgptr = hdr_pgptr;
2963  log_skip_logging (thread_p, &addr);
2964  pgbuf_set_dirty (thread_p, hdr_pgptr, FREE);
2965  hdr_pgptr = NULL;
2966  }
2967  else
2968  {
2969  pgbuf_unfix_and_init (thread_p, hdr_pgptr);
2970  }
2971 
2972  return ret;
2973 
2974 exit_on_error:
2975  if (hdr_pgptr)
2976  {
2977  pgbuf_unfix_and_init (thread_p, hdr_pgptr);
2978  }
2979 
2980  return (ret == NO_ERROR) ? ER_FAILED : ret;
2981 }
2982 
2983 /*
2984  * heap_stats_put_second_best () - Put a free page into second best hint array
2985  * return: void
2986  * heap_hdr(in): Statistics of heap file
2987  * vpid(in): VPID to be added
2988  *
2989  * NOTE: A free page is not always inserted to the second best hint array.
2990  * Second best hints will be collected for every 1000 pages in order
2991  * to increase randomness for "emptying contiguous pages" scenario.
2992  */
2993 static void
2994 heap_stats_put_second_best (HEAP_HDR_STATS * heap_hdr, VPID * vpid)
2995 {
2996  int tail;
2997 
2998  if (heap_hdr->estimates.num_substitutions++ % 1000 == 0)
2999  {
3000  tail = heap_hdr->estimates.tail_second_best;
3001 
3002  heap_hdr->estimates.second_best[tail] = *vpid;
3004 
3006  {
3007  assert (heap_hdr->estimates.head_second_best == tail);
3008  heap_hdr->estimates.head_second_best = heap_hdr->estimates.tail_second_best;
3009  }
3010  else
3011  {
3013  heap_hdr->estimates.num_second_best++;
3014  }
3015 
3016  /* If both head and tail refer to the same index, the number of second best hints is
3017  * HEAP_NUM_BEST_SPACESTATS(10). */
3018  assert (heap_hdr->estimates.num_second_best != 0);
3020  ? ((heap_hdr->estimates.tail_second_best - heap_hdr->estimates.head_second_best)
3021  == heap_hdr->estimates.num_second_best)
3022  : ((10 + heap_hdr->estimates.tail_second_best - heap_hdr->estimates.head_second_best)
3023  == heap_hdr->estimates.num_second_best));
3024 
3025  heap_hdr->estimates.num_substitutions = 1;
3026  }
3027 }
3028 
3029 /*
3030  * heap_stats_put_second_best () - Get a free page from second best hint array
3031  * return: NO_ERROR or ER_FAILED
3032  * heap_hdr(in): Statistics of heap file
3033  * vpid(out): VPID to get
3034  */
3035 static int
3036 heap_stats_get_second_best (HEAP_HDR_STATS * heap_hdr, VPID * vpid)
3037 {
3038  int head;
3039 
3040  assert (vpid != NULL);
3041 
3042  if (heap_hdr->estimates.num_second_best == 0)
3043  {
3044  assert (heap_hdr->estimates.tail_second_best == heap_hdr->estimates.head_second_best);
3045  VPID_SET_NULL (vpid);
3046  return ER_FAILED;
3047  }
3048 
3049  head = heap_hdr->estimates.head_second_best;
3050 
3051  heap_hdr->estimates.num_second_best--;
3053 
3054  /* If both head and tail refer to the same index, the number of second best hints is 0. */
3056  assert ((heap_hdr->estimates.tail_second_best >= heap_hdr->estimates.head_second_best)
3057  ? ((heap_hdr->estimates.tail_second_best - heap_hdr->estimates.head_second_best)
3058  == heap_hdr->estimates.num_second_best)
3059  : ((10 + heap_hdr->estimates.tail_second_best - heap_hdr->estimates.head_second_best)
3060  == heap_hdr->estimates.num_second_best));
3061 
3062  *vpid = heap_hdr->estimates.second_best[head];
3063  return NO_ERROR;
3064 }
3065 
3066 #if defined(ENABLE_UNUSED_FUNCTION)
3067 /*
3068  * heap_stats_quick_num_fit_in_bestspace () - Guess the number of unit_size entries that
3069  * can fit in best space
3070  * return: number of units
3071  * bestspace(in): Array of best pages along with their freespace
3072  * (The freespace fields may be updated as a SIDE EFFECT)
3073  * num_entries(in): Number of estimated entries in best space.
3074  * unit_size(in): Units of this size
3075  * unfill_space(in): Unfill space on the pages
3076  *
3077  * Note: Find the number of units of "unit_size" that can fit in
3078  * current betsspace.
3079  */
3080 static int
3081 heap_stats_quick_num_fit_in_bestspace (HEAP_BESTSPACE * bestspace, int num_entries, int unit_size, int unfill_space)
3082 {
3083  int total_nunits = 0;
3084  int i;
3085 
3086  if (unit_size <= 0)
3087  {
3088  return ER_FAILED;
3089  }
3090 
3091  for (i = 0; i < num_entries; i++)
3092  {
3093  if ((bestspace[i].freespace - unfill_space) >= unit_size)
3094  {
3095  /*
3096  * How many min_spaces can fit in this page
3097  */
3098  total_nunits += (bestspace[i].freespace - unfill_space) / unit_size;
3099  }
3100  }
3101 
3102  return total_nunits;
3103 }
3104 #endif
3105 
3106 /*
3107  * heap_stats_find_page_in_bestspace () - Find a page within best space
3108  * statistics with the needed space
3109  * return: HEAP_FINDPSACE (found, not found, or error)
3110  * hfid(in): Object heap file identifier
3111  * bestspace(in): Array of best pages along with their freespace
3112  * (The freespace fields may be updated as a SIDE EFFECT)
3113  * idx_badspace(in/out): An index into best space with no so good space.
3114  * needed_space(in): The needed space.
3115  * scan_cache(in): Scan cache if any
3116  * pgptr(out): Best page with enough space or NULL
3117  *
3118  * Note: Search for a page within the best space cache which has the
3119  * needed space. The free space fields of best space cache along
3120  * with some other index information are updated (as a side
3121  * effect) as the best space cache is accessed.
3122  */
3123 static HEAP_FINDSPACE
3124 heap_stats_find_page_in_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_BESTSPACE * bestspace,
3125  int *idx_badspace, int record_length, int needed_space, HEAP_SCANCACHE * scan_cache,
3126  PGBUF_WATCHER * pg_watcher)
3127 {
3128 #define BEST_PAGE_SEARCH_MAX_COUNT 100
3129 
3130  HEAP_FINDSPACE found;
3131  int old_wait_msecs;
3132  int notfound_cnt;
3133  HEAP_STATS_ENTRY *ent;
3134  HEAP_BESTSPACE best;
3135  int rc;
3136  int idx_worstspace;
3137  int i, best_array_index = -1;
3138  bool hash_is_available;
3139  bool best_hint_is_used;
3140 
3141  assert (PGBUF_IS_CLEAN_WATCHER (pg_watcher));
3142 
3143  /*
3144  * If a page is busy, don't wait continue looking for other pages in our
3145  * statistics. This will improve some contentions on the heap at the
3146  * expenses of storage.
3147  */
3148 
3149  /* LK_FORCE_ZERO_WAIT doesn't set error when deadlock occurs */
3150  old_wait_msecs = xlogtb_reset_wait_msecs (thread_p, LK_FORCE_ZERO_WAIT);
3151 
3152  found = HEAP_FINDSPACE_NOTFOUND;
3153  notfound_cnt = 0;
3154  best_array_index = 0;
3155  hash_is_available = prm_get_integer_value (PRM_ID_HF_MAX_BESTSPACE_ENTRIES) > 0;
3156 
3157  while (found == HEAP_FINDSPACE_NOTFOUND)
3158  {
3159  best.freespace = -1; /* init */
3160  best_hint_is_used = false;
3161 
3162  if (hash_is_available)
3163  {
3164  rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex);
3165 
3166  while (notfound_cnt < BEST_PAGE_SEARCH_MAX_COUNT
3167  && (ent = (HEAP_STATS_ENTRY *) mht_get2 (heap_Bestspace->hfid_ht, hfid, NULL)) != NULL)
3168  {
3169  if (ent->best.freespace >= needed_space)
3170  {
3171  best = ent->best;
3172  assert (best.freespace > 0 && best.freespace <= PGLENGTH_MAX);
3173  break;
3174  }
3175 
3176  /* remove in memory bestspace */
3177  (void) mht_rem2 (heap_Bestspace->hfid_ht, &ent->hfid, ent, NULL, NULL);
3178  (void) mht_rem (heap_Bestspace->vpid_ht, &ent->best.vpid, NULL, NULL);
3179  (void) heap_stats_entry_free (thread_p, ent, NULL);
3180  ent = NULL;
3181 
3182  heap_Bestspace->num_stats_entries--;
3183 
3184  notfound_cnt++;
3185  }
3186 
3187  pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex);
3188  }
3189 
3190  if (best.freespace == -1)
3191  {
3192  /* Maybe PRM_ID_HF_MAX_BESTSPACE_ENTRIES <= 0 or There is no best space in heap_Bestspace hashtable. We will
3193  * use bestspace hint in heap_header. */
3194  while (best_array_index < HEAP_NUM_BEST_SPACESTATS)
3195  {
3196  if (bestspace[best_array_index].freespace >= needed_space)
3197  {
3198  best.vpid = bestspace[best_array_index].vpid;
3199  best.freespace = bestspace[best_array_index].freespace;
3200  assert (best.freespace > 0 && best.freespace <= PGLENGTH_MAX);
3201  best_hint_is_used = true;
3202  break;
3203  }
3204  best_array_index++;
3205  }
3206  }
3207 
3208  if (best.freespace == -1)
3209  {
3210  break; /* not found, exit loop */
3211  }
3212 
3213  /* If page could not be fixed, we will interrogate er_errid () to see the error type. If an error is already
3214  * set, the interrogation will be corrupted.
3215  * Make sure an error is not set.
3216  */
3217  if (er_errid () != NO_ERROR)
3218  {
3219  if (er_errid () == ER_INTERRUPTED)
3220  {
3221  /* interrupt arrives at any time */
3222  break;
3223  }
3224 #if defined (SERVER_MODE)
3225  assert (er_errid () == ER_INTERRUPTED);
3226 #endif /* SERVER_MODE */
3227  er_clear ();
3228  }
3229 
3230  pg_watcher->pgptr = heap_scan_pb_lock_and_fetch (thread_p, &best.vpid, OLD_PAGE, X_LOCK, scan_cache, pg_watcher);
3231  if (pg_watcher->pgptr == NULL)
3232  {
3233  /*
3234  * Either we timeout and we want to continue in this case, or
3235  * we have another kind of problem.
3236  */
3237  switch (er_errid ())
3238  {
3239  case NO_ERROR:
3240  /* In case of latch-timeout in pgbuf_fix, the timeout error(ER_LK_PAGE_TIMEOUT) is not set, because lock
3241  * wait time is LK_FORCE_ZERO_WAIT. So we will just continue to find another page. */
3242  break;
3243 
3244  case ER_INTERRUPTED:
3245  found = HEAP_FINDSPACE_ERROR;
3246  break;
3247 
3248  default:
3249  /*
3250  * Something went wrong, we are unable to fetch this page.
3251  */
3252  if (best_hint_is_used == true)
3253 
3254  {
3255  assert (best_array_index < HEAP_NUM_BEST_SPACESTATS);
3256  bestspace[best_array_index].freespace = 0;
3257  }
3258  else
3259  {
3260  (void) heap_stats_del_bestspace_by_vpid (thread_p, &best.vpid);
3261  }
3262  found = HEAP_FINDSPACE_ERROR;
3263 
3264  /* Do not allow unexpected errors. */
3265  assert (false);
3266  break;
3267  }
3268  }
3269  else
3270  {
3271  best.freespace = spage_max_space_for_new_record (thread_p, pg_watcher->pgptr);
3272  if (best.freespace >= needed_space)
3273  {
3274  /*
3275  * Decrement by only the amount space needed by the caller. Don't
3276  * include the unfill factor
3277  */
3278  best.freespace -= record_length + heap_Slotted_overhead;
3279  found = HEAP_FINDSPACE_FOUND;
3280  }
3281 
3282  if (hash_is_available)
3283  {
3284  /* Add or refresh the free space of the page */
3285  (void) heap_stats_add_bestspace (thread_p, hfid, &best.vpid, best.freespace);
3286  }
3287 
3288  if (best_hint_is_used == true)
3289  {
3290  assert (VPID_EQ (&best.vpid, &(bestspace[best_array_index].vpid)));
3291  assert (best_array_index < HEAP_NUM_BEST_SPACESTATS);
3292 
3293  bestspace[best_array_index].freespace = best.freespace;
3294  }
3295 
3296  if (found != HEAP_FINDSPACE_FOUND)
3297  {
3298  pgbuf_ordered_unfix (thread_p, pg_watcher);
3299  }
3300  }
3301 
3302  if (found == HEAP_FINDSPACE_NOTFOUND)
3303  {
3304  if (best_hint_is_used)
3305  {
3306  /* Increment best_array_index for next search */
3307  best_array_index++;
3308  }
3309  else
3310  {
3311  notfound_cnt++;
3312  }
3313  }
3314  }
3315 
3316  idx_worstspace = 0;
3317  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
3318  {
3319  /* find worst space in bestspace */
3320  if (bestspace[idx_worstspace].freespace > bestspace[i].freespace)
3321  {
3322  idx_worstspace = i;
3323  }
3324 
3325  /* update bestspace of heap header page if found best page at memory hash table */
3326  if (best_hint_is_used == false && found == HEAP_FINDSPACE_FOUND && VPID_EQ (&best.vpid, &bestspace[i].vpid))
3327  {
3328  bestspace[i].freespace = best.freespace;
3329  }
3330  }
3331 
3332  /*
3333  * Set the idx_badspace to the index with the smallest free space
3334  * which may not be accurate. This is used for future lookups (where to
3335  * start) into the findbest space ring.
3336  */
3337  *idx_badspace = idx_worstspace;
3338 
3339  /*
3340  * Reset back the timeout value of the transaction
3341  */
3342  (void) xlogtb_reset_wait_msecs (thread_p, old_wait_msecs);
3343 
3344  return found;
3345 }
3346 
3347 /*
3348  * heap_stats_find_best_page () - Find a page with the needed space.
3349  * return: pointer to page with enough space or NULL
3350  * hfid(in): Object heap file identifier
3351  * needed_space(in): The minimal space needed
3352  * isnew_rec(in): Are we inserting a new record to the heap ?
3353  * newrec_size(in): Size of the new record
3354  * scan_cache(in/out): Scan cache used to estimate the best space pages
3355  *
3356  * Note: Find a page among the set of best pages of the heap which has
3357  * the needed space. If we do not find any page, a new page is
3358  * allocated. The heap header and the scan cache may be updated
3359  * as a side effect to reflect more accurate space on some of the
3360  * set of best pages.
3361  */
3362 static PAGE_PTR
3363 heap_stats_find_best_page (THREAD_ENTRY * thread_p, const HFID * hfid, int needed_space, bool isnew_rec,
3364  int newrec_size, HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * pg_watcher)
3365 {
3366  VPID vpid; /* Volume and page identifiers */
3367  LOG_DATA_ADDR addr_hdr; /* Address of logging data */
3368  RECDES hdr_recdes; /* Record descriptor to point to space statistics */
3369  HEAP_HDR_STATS *heap_hdr; /* Heap header */
3370  VPID *hdr_vpidp;
3371  int total_space;
3372  int try_find, try_sync;
3373  int num_pages_found;
3374  float other_high_best_ratio;
3375  PGBUF_WATCHER hdr_page_watcher;
3376  int error_code = NO_ERROR;
3377 
3378  /*
3379  * Try to use the space cache for as much information as possible to avoid
3380  * fetching and updating the header page a lot.
3381  */
3382 
3383  assert (scan_cache == NULL || scan_cache->cache_last_fix_page == false || scan_cache->page_watcher.pgptr == NULL);
3384  PGBUF_INIT_WATCHER (&hdr_page_watcher, PGBUF_ORDERED_HEAP_HDR, hfid);
3385 
3386  /*
3387  * Get the heap header in exclusive mode since it is going to be changed.
3388  *
3389  * Note: to avoid any possibilities of deadlocks, I should not have any locks
3390  * on the heap at this moment.
3391  * That is, we must assume that locking the header of the heap in
3392  * exclusive mode, the rest of the heap is locked.
3393  */
3394 
3395  vpid.volid = hfid->vfid.volid;
3396  vpid.pageid = hfid->hpgid;
3397 
3398  addr_hdr.vfid = &hfid->vfid;
3400 
3401  error_code = pgbuf_ordered_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &hdr_page_watcher);
3402  if (error_code != NO_ERROR)
3403  {
3404  /* something went wrong. Unable to fetch header page */
3405  ASSERT_ERROR ();
3406  return NULL;
3407  }
3408  assert (hdr_page_watcher.pgptr != NULL);
3409 
3410  (void) pgbuf_check_page_ptype (thread_p, hdr_page_watcher.pgptr, PAGE_HEAP);
3411 
3412  if (spage_get_record (thread_p, hdr_page_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
3413  {
3414  assert (false);
3415  pgbuf_ordered_unfix (thread_p, &hdr_page_watcher);
3416  return NULL;
3417  }
3418 
3419  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
3420 
3421  if (isnew_rec == true)
3422  {
3423  heap_hdr->estimates.num_recs += 1;
3424  if (newrec_size > DB_PAGESIZE)
3425  {
3426  heap_hdr->estimates.num_pages += CEIL_PTVDIV (newrec_size, DB_PAGESIZE);
3427  }
3428  }
3429  heap_hdr->estimates.recs_sumlen += (float) newrec_size;
3430 
3431  assert (!heap_is_big_length (needed_space));
3432  /* Take into consideration the unfill factor for pages with objects */
3433  total_space = needed_space + heap_Slotted_overhead + heap_hdr->unfill_space;
3434  if (heap_is_big_length (total_space))
3435  {
3436  total_space = needed_space + heap_Slotted_overhead;
3437  }
3438 
3439  try_find = 0;
3440  while (true)
3441  {
3442  try_find++;
3443  assert (pg_watcher->pgptr == NULL);
3444  if (heap_stats_find_page_in_bestspace (thread_p, hfid, heap_hdr->estimates.best, &(heap_hdr->estimates.head),
3445  needed_space, total_space, scan_cache, pg_watcher) == HEAP_FINDSPACE_ERROR)
3446  {
3447  ASSERT_ERROR ();
3448  assert (pg_watcher->pgptr == NULL);
3449  pgbuf_ordered_unfix (thread_p, &hdr_page_watcher);
3450  return NULL;
3451  }
3452  if (pg_watcher->pgptr != NULL)
3453  {
3454  /* found the page */
3455  break;
3456  }
3457 
3458  assert (hdr_page_watcher.page_was_unfixed == false);
3459 
3460  if (heap_hdr->estimates.num_other_high_best <= 0 || heap_hdr->estimates.num_pages <= 0)
3461  {
3462  assert (heap_hdr->estimates.num_pages > 0);
3463  other_high_best_ratio = 0;
3464  }
3465  else
3466  {
3467  other_high_best_ratio =
3468  (float) heap_hdr->estimates.num_other_high_best / (float) heap_hdr->estimates.num_pages;
3469  }
3470 
3471  if (try_find >= 2 || other_high_best_ratio < HEAP_BESTSPACE_SYNC_THRESHOLD)
3472  {
3473  /* We stop to find free pages if: (1) we have tried to do it twice (2) it is first trying but we have no
3474  * hints Regarding (2), we will find free pages by heap_stats_sync_bestspace only if we know that a free page
3475  * exists somewhere. and (num_other_high_best/total page) > HEAP_BESTSPACE_SYNC_THRESHOLD.
3476  * num_other_high_best means the number of free pages existing somewhere in the heap file. */
3477  break;
3478  }
3479 
3480  /*
3481  * The followings will try to find free pages and fill best hints with them.
3482  */
3483 
3484  if (scan_cache != NULL)
3485  {
3486  assert (HFID_EQ (hfid, &scan_cache->node.hfid));
3487  assert (scan_cache->file_type != FILE_UNKNOWN_TYPE);
3488  }
3489 
3490  hdr_vpidp = pgbuf_get_vpid_ptr (hdr_page_watcher.pgptr);
3491 
3492  try_sync = 0;
3493  do
3494  {
3495  try_sync++;
3497  "heap_stats_find_best_page: call heap_stats_sync_bestspace() "
3498  "hfid { vfid { fileid %d volid %d } hpgid %d } hdr_vpid { pageid %d volid %d } "
3499  "scan_all %d ", hfid->vfid.fileid, hfid->vfid.volid, hfid->hpgid, hdr_vpidp->pageid,
3500  hdr_vpidp->volid, 0);
3501 
3502  num_pages_found = heap_stats_sync_bestspace (thread_p, hfid, heap_hdr, hdr_vpidp, false, true);
3503  if (num_pages_found < 0)
3504  {
3505  pgbuf_ordered_unfix (thread_p, &hdr_page_watcher);
3506  ASSERT_ERROR ();
3507  return NULL;
3508  }
3509  }
3510  while (num_pages_found == 0 && try_sync <= 2);
3511 
3512  /* If we cannot find free pages, give up. */
3513  if (num_pages_found <= 0)
3514  {
3515  break;
3516  }
3517  }
3518 
3519  if (pg_watcher->pgptr == NULL)
3520  {
3521  /*
3522  * None of the best pages has the needed space, allocate a new page.
3523  * Set the head to the index with the smallest free space, which may not
3524  * be accurate.
3525  */
3526  if (heap_vpid_alloc (thread_p, hfid, hdr_page_watcher.pgptr, heap_hdr, scan_cache, pg_watcher) != NO_ERROR)
3527  {
3528  ASSERT_ERROR ();
3529  pgbuf_ordered_unfix (thread_p, &hdr_page_watcher);
3530  return NULL;
3531  }
3532  assert (pg_watcher->pgptr != NULL || er_errid () == ER_INTERRUPTED
3534  }
3535 
3536  addr_hdr.pgptr = hdr_page_watcher.pgptr;
3537  log_skip_logging (thread_p, &addr_hdr);
3538  pgbuf_ordered_set_dirty_and_free (thread_p, &hdr_page_watcher);
3539 
3540  return pg_watcher->pgptr;
3541 }
3542 
3543 /*
3544  * heap_stats_sync_bestspace () - Synchronize the statistics of best space
3545  * return: the number of pages found
3546  * hfid(in): Heap file identifier
3547  * heap_hdr(in): Heap header (Heap header page should be acquired in
3548  * exclusive mode)
3549  * hdr_vpid(in):
3550  * scan_all(in): Scan the whole heap or stop after HEAP_NUM_BEST_SPACESTATS
3551  * best pages have been found.
3552  * can_cycle(in): True, it allows to go back to beginning of the heap.
3553  * FALSE, don't go back to beginning of the heap. FALSE is used
3554  * when it is known that there is not free space at the
3555  * beginning of heap. For example, it can be used when we
3556  * pre-allocate. pages
3557  *
3558  * Note: Synchronize for best space, so that we can reuse heap space as
3559  * much as possible.
3560  *
3561  * Note: This function does not do any logging.
3562  */
3563 static int
3564 heap_stats_sync_bestspace (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_STATS * heap_hdr, VPID * hdr_vpid,
3565  bool scan_all, bool can_cycle)
3566 {
3567  int i, best, num_high_best, num_other_best, start_pos;
3569  VPID start_vpid = { NULL_PAGEID, NULL_VOLID };
3570  VPID next_vpid = { NULL_PAGEID, NULL_VOLID };
3571  VPID stopat_vpid = { NULL_PAGEID, NULL_VOLID };
3572  int num_pages = 0;
3573  int num_recs = 0;
3574  float recs_sumlen = 0.0;
3575  int free_space = 0;
3576  int min_freespace;
3577  int ret = NO_ERROR;
3578  int npages = 0, nrecords = 0, rec_length;
3579  int num_iterations = 0, max_iterations;
3580  HEAP_BESTSPACE *best_pages_hint_p;
3581  bool iterate_all = false;
3582  bool search_all = false;
3583  PGBUF_WATCHER pg_watcher;
3584  PGBUF_WATCHER old_pg_watcher;
3585 #if defined (CUBRID_DEBUG)
3586  TSC_TICKS start_tick, end_tick;
3587  TSCTIMEVAL tv_diff;
3588 
3589  float elapsed;
3590 
3591  tsc_getticks (&start_tick);
3592 #endif /* CUBRID_DEBUG */
3593 
3597 
3598  min_freespace = heap_stats_get_min_freespace (heap_hdr);
3599 
3600  best = 0;
3601  start_pos = -1;
3602  num_high_best = num_other_best = 0;
3603 
3604  if (scan_all != true)
3605  {
3607  {
3608  search_all = true;
3609  start_pos = -1;
3610  next_vpid = heap_hdr->estimates.full_search_vpid;
3611  }
3612  else
3613  {
3614  if (heap_hdr->estimates.num_high_best > 0)
3615  {
3616  /* Use recently inserted one first. */
3617  start_pos = HEAP_STATS_PREV_BEST_INDEX (heap_hdr->estimates.head);
3618  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
3619  {
3620  if (!VPID_ISNULL (&heap_hdr->estimates.best[start_pos].vpid))
3621  {
3622  next_vpid = heap_hdr->estimates.best[start_pos].vpid;
3623  start_vpid = next_vpid;
3624  break;
3625  }
3626 
3627  start_pos = HEAP_STATS_PREV_BEST_INDEX (start_pos);
3628  }
3629  }
3630  else
3631  {
3632  /* If there are hint pages in second best array, we will try to use it first. Otherwise, we will search
3633  * all pages in the file. */
3634  if (heap_hdr->estimates.num_second_best > 0)
3635  {
3636  if (heap_stats_get_second_best (heap_hdr, &next_vpid) != NO_ERROR)
3637  {
3638  /* This should not be happened. */
3639  assert (false);
3640  search_all = true;
3641  }
3642  }
3643  else
3644  {
3645  search_all = true;
3646  }
3647 
3648  if (search_all == true)
3649  {
3650  assert (VPID_ISNULL (&next_vpid));
3651  next_vpid = heap_hdr->estimates.full_search_vpid;
3652  }
3653 
3654  start_vpid = next_vpid;
3655  start_pos = -1;
3656  }
3657  }
3658 
3659  if (can_cycle == true)
3660  {
3661  stopat_vpid = next_vpid;
3662  }
3663  }
3664 
3665  if (VPID_ISNULL (&next_vpid))
3666  {
3667  /*
3668  * Start from beginning of heap due to lack of statistics.
3669  */
3670  next_vpid.volid = hfid->vfid.volid;
3671  next_vpid.pageid = hfid->hpgid;
3672  start_vpid = next_vpid;
3673  start_pos = -1;
3674  can_cycle = false;
3675  }
3676 
3677  /*
3678  * Note that we do not put any locks on the pages that we are scanning
3679  * since the best space array is only used for hints, and it is OK
3680  * if it is a little bit wrong.
3681  */
3682  best_pages_hint_p = heap_hdr->estimates.best;
3683 
3684  num_iterations = 0;
3685  max_iterations = MIN ((int) (heap_hdr->estimates.num_pages * 0.2), heap_Find_best_page_limit);
3686  max_iterations = MAX (max_iterations, HEAP_NUM_BEST_SPACESTATS);
3687 
3688  while (!VPID_ISNULL (&next_vpid) || can_cycle == true)
3689  {
3690  if (can_cycle == true && VPID_ISNULL (&next_vpid))
3691  {
3692  /*
3693  * Go back to beginning of heap looking for good pages with a lot of
3694  * free space
3695  */
3696  next_vpid.volid = hfid->vfid.volid;
3697  next_vpid.pageid = hfid->hpgid;
3698  can_cycle = false;
3699  }
3700 
3701  while ((scan_all == true || num_high_best < HEAP_NUM_BEST_SPACESTATS) && !VPID_ISNULL (&next_vpid)
3702  && (can_cycle == true || !VPID_EQ (&next_vpid, &stopat_vpid)))
3703  {
3704  if (scan_all == false)
3705  {
3706  if (++num_iterations > max_iterations)
3707  {
3709  "heap_stats_sync_bestspace: num_iterations %d best %d "
3710  "next_vpid { pageid %d volid %d }\n", num_iterations, num_high_best, next_vpid.pageid,
3711  next_vpid.volid);
3712 
3713  /* TODO: Do we really need to update the last scanned */
3714  /* in case we found less than 10 pages. */
3715  /* It is obivous we didn't find any pages. */
3716  if (start_pos != -1 && num_high_best == 0)
3717  {
3718  /* Delete a starting VPID. */
3719  VPID_SET_NULL (&best_pages_hint_p[start_pos].vpid);
3720  best_pages_hint_p[start_pos].freespace = 0;
3721 
3722  heap_hdr->estimates.num_high_best--;
3723  }
3724  iterate_all = true;
3725  break;
3726  }
3727  }
3728 
3729  vpid = next_vpid;
3730  ret = pgbuf_ordered_fix (thread_p, &vpid, OLD_PAGE_PREVENT_DEALLOC, PGBUF_LATCH_READ, &pg_watcher);
3731  if (ret != NO_ERROR)
3732  {
3733  break;
3734  }
3735  (void) pgbuf_check_page_ptype (thread_p, pg_watcher.pgptr, PAGE_HEAP);
3736 
3737  if (old_pg_watcher.pgptr != NULL)
3738  {
3739  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
3740  }
3741 
3742  ret = heap_vpid_next (thread_p, hfid, pg_watcher.pgptr, &next_vpid);
3743  if (ret != NO_ERROR)
3744  {
3745  assert (false);
3746  pgbuf_ordered_unfix (thread_p, &pg_watcher);
3747  break;
3748  }
3749  if (search_all)
3750  {
3751  /* Save the last position to be searched next time. */
3752  heap_hdr->estimates.full_search_vpid = next_vpid;
3753  }
3754 
3755  spage_collect_statistics (pg_watcher.pgptr, &npages, &nrecords, &rec_length);
3756 
3757  num_pages += npages;
3758  num_recs += nrecords;
3759  recs_sumlen += rec_length;
3760 
3761  free_space = spage_max_space_for_new_record (thread_p, pg_watcher.pgptr);
3762 
3763  if (free_space >= min_freespace && free_space > HEAP_DROP_FREE_SPACE)
3764  {
3766  {
3767  (void) heap_stats_add_bestspace (thread_p, hfid, &vpid, free_space);
3768  }
3769 
3770  if (num_high_best < HEAP_NUM_BEST_SPACESTATS)
3771  {
3772  best_pages_hint_p[best].vpid = vpid;
3773  best_pages_hint_p[best].freespace = free_space;
3774 
3775  best = HEAP_STATS_NEXT_BEST_INDEX (best);
3776  num_high_best++;
3777  }
3778  else
3779  {
3780  num_other_best++;
3781  }
3782  }
3783 
3784  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
3785  }
3786 
3787  assert (pg_watcher.pgptr == NULL);
3788  if (old_pg_watcher.pgptr != NULL)
3789  {
3790  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
3791  }
3792 
3793  if (scan_all == false
3794  && (iterate_all == true || num_high_best == HEAP_NUM_BEST_SPACESTATS
3795  || (can_cycle == false && VPID_EQ (&next_vpid, &stopat_vpid))))
3796  {
3797  break;
3798  }
3799 
3800  VPID_SET_NULL (&next_vpid);
3801  }
3802 
3804  "heap_stats_sync_bestspace: scans from {%d|%d} to {%d|%d}, num_iterations(%d) "
3805  "max_iterations(%d) num_high_best(%d)\n", start_vpid.volid, start_vpid.pageid, vpid.volid, vpid.pageid,
3806  num_iterations, max_iterations, num_high_best);
3807 
3808  /* If we have scanned all pages, we should update all statistics even if we have not found any hints. This logic is
3809  * used to handle "select count(*) from table". */
3810  if (scan_all == false && num_high_best == 0 && heap_hdr->estimates.num_second_best == 0)
3811  {
3812  return 0;
3813  }
3814 
3815  if (num_high_best < HEAP_NUM_BEST_SPACESTATS)
3816  {
3817  for (i = best; i < HEAP_NUM_BEST_SPACESTATS; i++)
3818  {
3819  VPID_SET_NULL (&best_pages_hint_p[i].vpid);
3820  best_pages_hint_p[i].freespace = 0;
3821  }
3822  }
3823 
3824  heap_hdr->estimates.head = best; /* reinit */
3825  heap_hdr->estimates.num_high_best = num_high_best;
3826  assert (heap_hdr->estimates.head >= 0 && heap_hdr->estimates.head < HEAP_NUM_BEST_SPACESTATS
3828 
3829  if (scan_all == true || heap_hdr->estimates.num_pages <= num_pages)
3830  {
3831  /*
3832  * We scan the whole heap.
3833  * Reset its statistics with new found statistics
3834  */
3835  heap_hdr->estimates.num_other_high_best = num_other_best;
3836  heap_hdr->estimates.num_pages = num_pages;
3837  heap_hdr->estimates.num_recs = num_recs;
3838  heap_hdr->estimates.recs_sumlen = recs_sumlen;
3839  }
3840  else
3841  {
3842  /*
3843  * We did not scan the whole heap.
3844  * We reset only some of its statistics since we do not have any idea
3845  * which ones are better the ones that are currently recorded or the ones
3846  * just found.
3847  */
3848  heap_hdr->estimates.num_other_high_best -= heap_hdr->estimates.num_high_best;
3849 
3850  if (heap_hdr->estimates.num_other_high_best < num_other_best)
3851  {
3852  heap_hdr->estimates.num_other_high_best = num_other_best;
3853  }
3854 
3855  if (num_recs > heap_hdr->estimates.num_recs || recs_sumlen > heap_hdr->estimates.recs_sumlen)
3856  {
3857  heap_hdr->estimates.num_pages = num_pages;
3858  heap_hdr->estimates.num_recs = num_recs;
3859  heap_hdr->estimates.recs_sumlen = recs_sumlen;
3860  }
3861  }
3862 
3863 #if defined (CUBRID_DEBUG)
3864  tsc_getticks (&end_tick);
3865  tsc_elapsed_time_usec (&tv_diff, end_tick, start_tick);
3866  elapsed = (float) tv_diff.tv_sec * 1000000;
3867  elapsed += (float) tv_diff.tv_usec;
3868  elapsed /= 1000000;
3869 
3870  er_log_debug (ARG_FILE_LINE, "heap_stats_sync_bestspace: elapsed time %.6f", elapsed);
3871 #endif /* CUBRID_DEBUG */
3872 
3873  return num_high_best;
3874 }
3875 
3876 /*
3877  * heap_get_last_page () - Get the last page pointer.
3878  * return: error code
3879  * hfid(in): Object heap file identifier
3880  * heap_hdr(in): The heap header structure
3881  * scan_cache(in): Scan cache
3882  * last_vpid(out): VPID of the last page
3883  *
3884  * Note: The last vpid is saved on heap header. We log it and should be the right VPID.
3885  */
3886 static int
3887 heap_get_last_page (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_STATS * heap_hdr, HEAP_SCANCACHE * scan_cache,
3888  VPID * last_vpid, PGBUF_WATCHER * pg_watcher)
3889 {
3890  int error_code = NO_ERROR;
3891 
3892  assert (pg_watcher != NULL);
3893  assert (last_vpid != NULL);
3894  assert (!VPID_ISNULL (&heap_hdr->estimates.last_vpid));
3895 
3896  *last_vpid = heap_hdr->estimates.last_vpid;
3897  pg_watcher->pgptr = heap_scan_pb_lock_and_fetch (thread_p, last_vpid, OLD_PAGE, X_LOCK, scan_cache, pg_watcher);
3898  if (pg_watcher->pgptr == NULL)
3899  {
3900  ASSERT_ERROR_AND_SET (error_code);
3901  return error_code;
3902  }
3903 
3904 #if !defined (NDEBUG)
3905  {
3906  RECDES recdes;
3907  HEAP_CHAIN *chain;
3908  if (spage_get_record (thread_p, pg_watcher->pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
3909  {
3910  assert (false);
3911  pgbuf_ordered_unfix (thread_p, pg_watcher);
3912  return ER_FAILED;
3913  }
3914  chain = (HEAP_CHAIN *) recdes.data;
3915  assert (VPID_ISNULL (&chain->next_vpid));
3916  }
3917 #endif /* !NDEBUG */
3918 
3919  return NO_ERROR;
3920 }
3921 
3922 /*
3923  * heap_get_last_vpid () - Get last heap page VPID from heap file header
3924  *
3925  * return : Error code
3926  * thread_p (in) : Thread entry
3927  * hfid (in) : Heap file identifier
3928  * last_vpid (out) : Last heap page VPID
3929  */
3930 STATIC_INLINE int
3931 heap_get_last_vpid (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * last_vpid)
3932 {
3933  PGBUF_WATCHER watcher_heap_header;
3934  VPID vpid_heap_header;
3935  HEAP_HDR_STATS *hdr_stats = NULL;
3936 
3937  int error_code = NO_ERROR;
3938 
3939  PGBUF_INIT_WATCHER (&watcher_heap_header, PGBUF_ORDERED_HEAP_HDR, hfid);
3940 
3941  VPID_SET_NULL (last_vpid);
3942 
3943  vpid_heap_header.volid = hfid->vfid.volid;
3944  vpid_heap_header.pageid = hfid->hpgid;
3945  error_code = pgbuf_ordered_fix (thread_p, &vpid_heap_header, OLD_PAGE, PGBUF_LATCH_READ, &watcher_heap_header);
3946  if (error_code != NO_ERROR)
3947  {
3948  ASSERT_ERROR ();
3949  return error_code;
3950  }
3951 
3952  hdr_stats = heap_get_header_stats_ptr (thread_p, watcher_heap_header.pgptr);
3953  if (hdr_stats == NULL)
3954  {
3955  assert_release (false);
3956  pgbuf_ordered_unfix (thread_p, &watcher_heap_header);
3957  return ER_FAILED;
3958  }
3959  *last_vpid = hdr_stats->estimates.last_vpid;
3960  pgbuf_ordered_unfix (thread_p, &watcher_heap_header);
3961  return NO_ERROR;
3962 }
3963 
3964 /*
3965  * heap_get_header_stats_ptr () - Get pointer to heap header statistics.
3966  *
3967  * return : Pointer to heap header statistics
3968  * page_header (in) : Heap header page
3969  */
3972 {
3973  RECDES recdes;
3974 
3975  if (spage_get_record (thread_p, page_header, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
3976  {
3977  assert_release (false);
3978  return NULL;
3979  }
3980  return (HEAP_HDR_STATS *) recdes.data;
3981 }
3982 
3983 /*
3984  * heap_copy_header_stats () - Copy heap header statistics
3985  *
3986  * return : Error code
3987  * page_header (in) : Heap header page
3988  * header_stats (out) : Heap header statistics
3989  */
3990 STATIC_INLINE int
3991 heap_copy_header_stats (THREAD_ENTRY * thread_p, PAGE_PTR page_header, HEAP_HDR_STATS * header_stats)
3992 {
3993  RECDES recdes;
3994 
3995  recdes.data = (char *) header_stats;
3996  recdes.area_size = sizeof (*header_stats);
3997  if (spage_get_record (thread_p, page_header, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, COPY) != S_SUCCESS)
3998  {
3999  assert_release (false);
4000  return ER_FAILED;
4001  }
4002  return NO_ERROR;
4003 }
4004 
4005 /*
4006  * heap_get_chain_ptr () - Get pointer to chain in heap page
4007  *
4008  * return : Pointer to chain in heap page
4009  * page_heap (in) : Heap page
4010  */
4013 {
4014  RECDES recdes;
4015 
4016  if (spage_get_record (thread_p, page_heap, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4017  {
4018  assert_release (false);
4019  return NULL;
4020  }
4021  return (HEAP_CHAIN *) recdes.data;
4022 }
4023 
4024 /*
4025  * heap_copy_chain () - Copy chain from heap page
4026  *
4027  * return : Error code
4028  * page_heap (in) : Heap page
4029  * chain (out) : Heap chain
4030  */
4031 STATIC_INLINE int
4032 heap_copy_chain (THREAD_ENTRY * thread_p, PAGE_PTR page_heap, HEAP_CHAIN * chain)
4033 {
4034  RECDES recdes;
4035 
4036  if (spage_get_record (thread_p, page_heap, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4037  {
4038  assert_release (false);
4039  return ER_FAILED;
4040  }
4041  assert (recdes.length >= (int) sizeof (*chain));
4042  memcpy (chain, recdes.data, sizeof (*chain));
4043  return NO_ERROR;
4044 }
4045 
4046 /*
4047  * heap_vpid_init_new () - FILE_INIT_PAGE_FUNC for heap non-header pages
4048  *
4049  * return : Error code
4050  * thread_p (in) : Thread entry
4051  * page (in) : New heap file page
4052  * args (in) : HEAP_CHAIN *
4053  */
4054 static int
4055 heap_vpid_init_new (THREAD_ENTRY * thread_p, PAGE_PTR page, void *args)
4056 {
4058  HEAP_CHAIN chain;
4059  RECDES recdes;
4060  INT16 slotid;
4061  int sp_success;
4062 
4063  assert (page != NULL);
4064  assert (args != NULL);
4065 
4066  chain = *(HEAP_CHAIN *) args; /* get chain from args. it is already initialized */
4067 
4068  /* initialize new page. */
4069  addr.pgptr = page;
4070  pgbuf_set_page_ptype (thread_p, addr.pgptr, PAGE_HEAP);
4071 
4072  /* initialize the page and chain it with the previous last allocated page */
4073  spage_initialize (thread_p, addr.pgptr, heap_get_spage_type (), HEAP_MAX_ALIGN, SAFEGUARD_RVSPACE);
4074 
4075  recdes.area_size = recdes.length = sizeof (chain);
4076  recdes.type = REC_HOME;
4077  recdes.data = (char *) &chain;
4078 
4079  sp_success = spage_insert (thread_p, addr.pgptr, &recdes, &slotid);
4080  if (sp_success != SP_SUCCESS || slotid != HEAP_HEADER_AND_CHAIN_SLOTID)
4081  {
4082  assert (false);
4083 
4084  /* initialization has failed !! */
4085  if (sp_success != SP_SUCCESS)
4086  {
4088  }
4089  return ER_FAILED;
4090  }
4091 
4093  pgbuf_set_dirty (thread_p, addr.pgptr, DONT_FREE);
4094  return NO_ERROR;
4095 }
4096 
4097 /*
4098  * heap_vpid_alloc () - allocate, fetch, and initialize a new page
4099  * return: error code
4100  * hfid(in): Object heap file identifier
4101  * hdr_pgptr(in): The heap page header
4102  * heap_hdr(in): The heap header structure
4103  * scan_cache(in): Scan cache
4104  * new_pg_watcher(out): watcher for new page.
4105  *
4106  * Note: Allocate and initialize a new heap page. The heap header is
4107  * updated to reflect a newly allocated best space page and
4108  * the set of best space pages information may be updated to
4109  * include the previous best1 space page.
4110  */
4111 static int
4112 heap_vpid_alloc (THREAD_ENTRY * thread_p, const HFID * hfid, PAGE_PTR hdr_pgptr, HEAP_HDR_STATS * heap_hdr,
4113  HEAP_SCANCACHE * scan_cache, PGBUF_WATCHER * new_pg_watcher)
4114 {
4115  VPID vpid; /* Volume and page identifiers */
4116  LOG_DATA_ADDR addr = LOG_DATA_ADDR_INITIALIZER; /* Address of logging data */
4117  int best;
4118  VPID last_vpid;
4119  PGBUF_WATCHER last_pg_watcher;
4120  HEAP_CHAIN new_page_chain;
4121  HEAP_HDR_STATS heap_hdr_prev = *heap_hdr;
4122 
4123  int error_code = NO_ERROR;
4124 
4125  assert (PGBUF_IS_CLEAN_WATCHER (new_pg_watcher));
4126 
4127  PGBUF_INIT_WATCHER (&last_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
4128  addr.vfid = &hfid->vfid;
4130 
4131  error_code = heap_get_last_page (thread_p, hfid, heap_hdr, scan_cache, &last_vpid, &last_pg_watcher);
4132  if (error_code != NO_ERROR)
4133  {
4134  ASSERT_ERROR ();
4135  return error_code;
4136  }
4137  if (last_pg_watcher.pgptr == NULL)
4138  {
4139  /* something went wrong, return error */
4140  assert_release (false);
4141  return ER_FAILED;
4142  }
4143  assert (!VPID_ISNULL (&last_vpid));
4144 
4145  log_sysop_start (thread_p);
4146 
4147  /* init chain for new page */
4148  new_page_chain.class_oid = heap_hdr->class_oid;
4149  new_page_chain.prev_vpid = last_vpid;
4150  VPID_SET_NULL (&new_page_chain.next_vpid);
4151  new_page_chain.max_mvccid = MVCCID_NULL;
4152  new_page_chain.flags = 0;
4154 
4155  /* allocate new page and initialize it */
4156  error_code = file_alloc (thread_p, &hfid->vfid, heap_vpid_init_new, &new_page_chain, &vpid, NULL);
4157  if (error_code != NO_ERROR)
4158  {
4159  ASSERT_ERROR ();
4160  goto error;
4161  }
4162 
4163  /* add link from previous last page */
4165 
4166  if (last_pg_watcher.pgptr == hdr_pgptr)
4167  {
4168  heap_hdr->next_vpid = vpid;
4169  /* will be logged later */
4170  }
4171  else
4172  {
4173  HEAP_CHAIN *chain, chain_prev;
4174 
4175  /* get chain */
4176  chain = heap_get_chain_ptr (thread_p, last_pg_watcher.pgptr);
4177  if (chain == NULL)
4178  {
4179  assert_release (false);
4180  error_code = ER_FAILED;
4181  goto error;
4182  }
4183  /* update chain */
4184  /* save old chain for logging */
4185  chain_prev = *chain;
4186  /* change next link */
4187  chain->next_vpid = vpid;
4188 
4189  /* log change */
4190  addr.pgptr = last_pg_watcher.pgptr;
4191  log_append_undoredo_data (thread_p, RVHF_CHAIN, &addr, sizeof (HEAP_CHAIN), sizeof (HEAP_CHAIN), &chain_prev,
4192  chain);
4193  pgbuf_set_dirty (thread_p, addr.pgptr, DONT_FREE);
4194  }
4195 
4196  pgbuf_ordered_unfix (thread_p, &last_pg_watcher);
4197 
4198  /* now update header statistics for best1 space page. the changes to the statistics are not logged. */
4199  /* last page hint */
4200  heap_hdr->estimates.last_vpid = vpid;
4201  heap_hdr->estimates.num_pages++;
4202 
4203  best = heap_hdr->estimates.head;
4204  heap_hdr->estimates.head = HEAP_STATS_NEXT_BEST_INDEX (best);
4205  if (VPID_ISNULL (&heap_hdr->estimates.best[best].vpid))
4206  {
4207  heap_hdr->estimates.num_high_best++;
4209  }
4210  else
4211  {
4212  if (heap_hdr->estimates.best[best].freespace > HEAP_DROP_FREE_SPACE)
4213  {
4214  heap_hdr->estimates.num_other_high_best++;
4215  heap_stats_put_second_best (heap_hdr, &heap_hdr->estimates.best[best].vpid);
4216  }
4217  }
4218 
4219  heap_hdr->estimates.best[best].vpid = vpid;
4220  heap_hdr->estimates.best[best].freespace = DB_PAGESIZE;
4221 
4223  {
4224  (void) heap_stats_add_bestspace (thread_p, hfid, &vpid, heap_hdr->estimates.best[best].freespace);
4225  }
4226 
4227  /* we really have nothing to lose from logging stats here and also it is good to have a certain last VPID. */
4228  addr.pgptr = hdr_pgptr;
4229  log_append_undoredo_data (thread_p, RVHF_STATS, &addr, sizeof (HEAP_HDR_STATS), sizeof (HEAP_HDR_STATS),
4230  &heap_hdr_prev, heap_hdr);
4231  log_sysop_commit (thread_p);
4232 
4233  /* fix new page */
4234  new_pg_watcher->pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, X_LOCK, scan_cache, new_pg_watcher);
4235  if (new_pg_watcher->pgptr == NULL)
4236  {
4237  ASSERT_ERROR_AND_SET (error_code);
4238  return error_code;
4239  }
4240 
4241  return NO_ERROR;
4242 
4243 error:
4244  assert (error_code != NO_ERROR);
4245 
4246  if (last_pg_watcher.pgptr != NULL)
4247  {
4248  pgbuf_ordered_unfix (thread_p, &last_pg_watcher);
4249  }
4250  log_sysop_abort (thread_p);
4251 
4252  return error_code;
4253 }
4254 
4255 /*
4256  * heap_vpid_remove () - Deallocate a heap page
4257  * return: rm_vpid on success or NULL on error
4258  * hfid(in): Object heap file identifier
4259  * heap_hdr(in): The heap header stats
4260  * rm_vpid(in): Page to remove
4261  *
4262  * Note: The given page is removed from the heap. The linked list of heap
4263  * pages is updated to remove this page, and the heap header may
4264  * be updated if this page was part of the statistics.
4265  */
4266 static VPID *
4267 heap_vpid_remove (THREAD_ENTRY * thread_p, const HFID * hfid, HEAP_HDR_STATS * heap_hdr, VPID * rm_vpid)
4268 {
4269  RECDES rm_recdes; /* Record descriptor which holds the chain of the page to be removed */
4270  HEAP_CHAIN *rm_chain; /* Chain information of the page to be removed */
4271  VPID vpid; /* Real identifier of previous page */
4272  LOG_DATA_ADDR addr; /* Log address of previous page */
4273  RECDES recdes; /* Record descriptor to page header */
4274  HEAP_CHAIN chain; /* Chain to next and prev page */
4275  int sp_success;
4276  int i;
4277  PGBUF_WATCHER rm_pg_watcher;
4278  PGBUF_WATCHER prev_pg_watcher;
4279 
4281  PGBUF_INIT_WATCHER (&prev_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
4282 
4283  /*
4284  * Make sure that this is not the header page since the header page cannot
4285  * be removed. If the header page is removed.. the heap is gone
4286  */
4287 
4288  if (rm_vpid->pageid == hfid->hpgid && rm_vpid->volid == hfid->vfid.volid)
4289  {
4290  er_log_debug (ARG_FILE_LINE, "heap_vpid_remove: Trying to remove header page = %d|%d of heap file = %d|%d|%d",
4291  (int) rm_vpid->volid, rm_vpid->pageid, (int) hfid->vfid.volid, hfid->vfid.fileid, hfid->hpgid);
4293  goto error;
4294  }
4295 
4296  /* Get the chain record */
4297  rm_pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, rm_vpid, OLD_PAGE, X_LOCK, NULL, &rm_pg_watcher);
4298  if (rm_pg_watcher.pgptr == NULL)
4299  {
4300  /* Look like a system error. Unable to obtain chain header record */
4301  goto error;
4302  }
4303 
4304  if (spage_get_record (thread_p, rm_pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &rm_recdes, PEEK) != S_SUCCESS)
4305  {
4306  /* Look like a system error. Unable to obtain chain header record */
4307  goto error;
4308  }
4309 
4310  rm_chain = (HEAP_CHAIN *) rm_recdes.data;
4311 
4312  /*
4313  * UPDATE PREVIOUS PAGE
4314  *
4315  * Update chain next field of previous last page
4316  * If previous page is the heap header page, it contains a heap header
4317  * instead of a chain.
4318  */
4319 
4320  vpid = rm_chain->prev_vpid;
4321  addr.vfid = &hfid->vfid;
4323 
4324  prev_pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, X_LOCK, NULL, &prev_pg_watcher);
4325  if (prev_pg_watcher.pgptr == NULL)
4326  {
4327  /* something went wrong, return */
4328  goto error;
4329  }
4330 
4331  if (rm_pg_watcher.page_was_unfixed)
4332  {
4333  /* TODO : unexpected: need to reconsider the algorithm, if this is an ordinary case */
4335  vpid.volid, vpid.pageid);
4336  goto error;
4337  }
4338 
4339  /*
4340  * Make sure that the page to be removed is not referenced on the heap
4341  * statistics
4342  */
4343 
4344  assert (heap_hdr != NULL);
4345 
4346  /*
4347  * We cannot break in the following loop since a best page could be
4348  * duplicated
4349  */
4350  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
4351  {
4352  if (VPID_EQ (&heap_hdr->estimates.best[i].vpid, rm_vpid))
4353  {
4354  VPID_SET_NULL (&heap_hdr->estimates.best[i].vpid);
4355  heap_hdr->estimates.best[i].freespace = 0;
4356  heap_hdr->estimates.head = i;
4357  }
4358  }
4359 
4360  if (VPID_EQ (&heap_hdr->estimates.last_vpid, rm_vpid))
4361  {
4362  /* If the page is the last page of the heap file, update the hint */
4363  heap_hdr->estimates.last_vpid = rm_chain->prev_vpid;
4364  }
4365 
4366  /*
4367  * Is previous page the header page ?
4368  */
4369  if (vpid.pageid == hfid->hpgid && vpid.volid == hfid->vfid.volid)
4370  {
4371  /*
4372  * PREVIOUS PAGE IS THE HEADER PAGE.
4373  * It contains a heap header instead of a chain record
4374  */
4375  heap_hdr->next_vpid = rm_chain->next_vpid;
4376  }
4377  else
4378  {
4379  /*
4380  * PREVIOUS PAGE IS NOT THE HEADER PAGE.
4381  * It contains a chain...
4382  * We need to make sure that there is not references to the page to delete
4383  * in the statistics of the heap header
4384  */
4385 
4386  /* NOW check the PREVIOUS page */
4387 
4388  if (spage_get_record (thread_p, prev_pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4389  {
4390  /* Look like a system error. Unable to obtain header record */
4391  goto error;
4392  }
4393 
4394  /* Copy the chain record to memory.. so we can log the changes */
4395  memcpy (&chain, recdes.data, sizeof (chain));
4396 
4397  /* Modify the chain of the previous page in memory */
4398  chain.next_vpid = rm_chain->next_vpid;
4399 
4400  /* Get the chain record */
4401  recdes.area_size = recdes.length = sizeof (chain);
4402  recdes.type = REC_HOME;
4403  recdes.data = (char *) &chain;
4404 
4405  /* Log the desired changes.. and then change the header */
4406  addr.pgptr = prev_pg_watcher.pgptr;
4407  log_append_undoredo_data (thread_p, RVHF_CHAIN, &addr, sizeof (chain), sizeof (chain), recdes.data, &chain);
4408 
4409  /* Now change the record */
4410 
4411  sp_success = spage_update (thread_p, prev_pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes);
4412  if (sp_success != SP_SUCCESS)
4413  {
4414  /*
4415  * This look like a system error, size did not change, so why did it
4416  * fail
4417  */
4418  if (sp_success != SP_ERROR)
4419  {
4421  }
4422  goto error;
4423  }
4424 
4425  }
4426 
4427  /* Now set dirty, free and unlock the previous page */
4428  pgbuf_ordered_set_dirty_and_free (thread_p, &prev_pg_watcher);
4429 
4430  /*
4431  * UPDATE NEXT PAGE
4432  *
4433  * Update chain previous field of next page
4434  */
4435 
4436  if (!(VPID_ISNULL (&rm_chain->next_vpid)))
4437  {
4438  vpid = rm_chain->next_vpid;
4440 
4441  prev_pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, X_LOCK, NULL, &prev_pg_watcher);
4442  if (prev_pg_watcher.pgptr == NULL)
4443  {
4444  /* something went wrong, return */
4445  goto error;
4446  }
4447 
4448  /* Get the chain record */
4449  if (spage_get_record (thread_p, prev_pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4450  {
4451  /* Look like a system error. Unable to obtain header record */
4452  goto error;
4453  }
4454 
4455  /* Copy the chain record to memory.. so we can log the changes */
4456  memcpy (&chain, recdes.data, sizeof (chain));
4457 
4458  /* Modify the chain of the next page in memory */
4459  chain.prev_vpid = rm_chain->prev_vpid;
4460 
4461  /* Log the desired changes.. and then change the header */
4462  addr.pgptr = prev_pg_watcher.pgptr;
4463  log_append_undoredo_data (thread_p, RVHF_CHAIN, &addr, sizeof (chain), sizeof (chain), recdes.data, &chain);
4464 
4465  /* Now change the record */
4466  recdes.area_size = recdes.length = sizeof (chain);
4467  recdes.type = REC_HOME;
4468  recdes.data = (char *) &chain;
4469 
4470  sp_success = spage_update (thread_p, prev_pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes);
4471  if (sp_success != SP_SUCCESS)
4472  {
4473  /*
4474  * This look like a system error, size did not change, so why did it
4475  * fail
4476  */
4477  if (sp_success != SP_ERROR)
4478  {
4480  }
4481  goto error;
4482  }
4483 
4484  /* Now set dirty, free and unlock the next page */
4485 
4486  pgbuf_ordered_set_dirty_and_free (thread_p, &prev_pg_watcher);
4487  }
4488 
4489  /* Free the page to be deallocated and deallocate the page */
4490  pgbuf_ordered_unfix (thread_p, &rm_pg_watcher);
4491 
4492  if (file_dealloc (thread_p, &hfid->vfid, rm_vpid, FILE_HEAP) != NO_ERROR)
4493  {
4494  ASSERT_ERROR ();
4495  goto error;
4496  }
4497 
4498  (void) heap_stats_del_bestspace_by_vpid (thread_p, rm_vpid);
4499 
4500  return rm_vpid;
4501 
4502 error:
4503  if (rm_pg_watcher.pgptr != NULL)
4504  {
4505  pgbuf_ordered_unfix (thread_p, &rm_pg_watcher);
4506  }
4507  if (addr.pgptr != NULL)
4508  {
4509  pgbuf_ordered_unfix (thread_p, &prev_pg_watcher);
4510  }
4511 
4512  return NULL;
4513 }
4514 
4515 /*
4516  * heap_remove_page_on_vacuum () - Remove heap page from heap file during
4517  * vacuum process. Function is trying to
4518  * be as least intrusive as possible and all
4519  * required pages are latched conditionally.
4520  * Give up on any failed operation.
4521  *
4522  * return : True if page was deallocated, false if not.
4523  * thread_p (in) : Thread entry.
4524  * page_ptr (in) : Pointer to page being deallocated.
4525  * hfid (in) : Heap file identifier.
4526  */
4527 bool
4529 {
4530  VPID page_vpid = VPID_INITIALIZER; /* VPID of page being removed. */
4531  VPID prev_vpid = VPID_INITIALIZER; /* VPID of previous page. */
4532  VPID next_vpid = VPID_INITIALIZER; /* VPID of next page. */
4533  VPID header_vpid = VPID_INITIALIZER; /* VPID of heap header page. */
4534  HEAP_HDR_STATS heap_hdr; /* Header header & stats. */
4535  HEAP_CHAIN chain; /* Heap page header used to read and update page links. */
4536  RECDES copy_recdes; /* Record to copy header from pages. */
4537  /* Buffer used for copy record. */
4538  char copy_recdes_buffer[MAX (sizeof (HEAP_CHAIN), sizeof (HEAP_HDR_STATS)) + MAX_ALIGNMENT];
4539  RECDES update_recdes; /* Record containing updated header data. */
4540  int i = 0; /* Iterator. */
4541  bool is_system_op_started = false; /* Set to true once system operation is started. */
4542  PGBUF_WATCHER crt_watcher; /* Watcher for current page. */
4543  PGBUF_WATCHER header_watcher; /* Watcher for header page. */
4544  PGBUF_WATCHER prev_watcher; /* Watcher for previous page. */
4545  PGBUF_WATCHER next_watcher; /* Watcher for next page. */
4546 
4547  /* Assert expected arguments. */
4548  /* Page to remove must be fixed. */
4549  assert (page_ptr != NULL && *page_ptr != NULL);
4550  /* Page to remove must be empty. */
4551  assert (spage_number_of_records (*page_ptr) <= 1);
4552  /* Heap file identifier must be known. */
4553  assert (hfid != NULL && !HFID_IS_NULL (hfid));
4554 
4555  /* Get VPID of page to be removed. */
4556  pgbuf_get_vpid (*page_ptr, &page_vpid);
4557 
4558  if (page_vpid.pageid == hfid->hpgid && page_vpid.volid == hfid->vfid.volid)
4559  {
4560  /* Cannot remove heap file header page. */
4561  return false;
4562  }
4563 
4564  /* Use page watchers to do the ordered fix. */
4566  PGBUF_INIT_WATCHER (&header_watcher, PGBUF_ORDERED_HEAP_HDR, hfid);
4569 
4570  /* Current page is already fixed. Just attach watcher. */
4571  pgbuf_attach_watcher (thread_p, *page_ptr, PGBUF_LATCH_WRITE, hfid, &crt_watcher);
4572 
4573  /* Header vpid. */
4574  header_vpid.volid = hfid->vfid.volid;
4575  header_vpid.pageid = hfid->hpgid;
4576 
4577  /* Fix required pages: Heap header page. Previous page (always exists). Next page (if exists). */
4578 
4579  /* Fix header page first, because it has higher priority. */
4580  if (pgbuf_ordered_fix (thread_p, &header_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &header_watcher) != NO_ERROR)
4581  {
4582  /* Give up. */
4584  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid, page_vpid.pageid);
4585  goto error;
4586  }
4587  assert (header_watcher.pgptr != NULL);
4588 
4589  if (crt_watcher.page_was_unfixed)
4590  {
4591  *page_ptr = crt_watcher.pgptr; /* home was refixed */
4592  }
4593 
4594  /* Get previous and next page VPID's. */
4595  if (heap_vpid_prev (thread_p, hfid, *page_ptr, &prev_vpid) != NO_ERROR
4596  || heap_vpid_next (thread_p, hfid, *page_ptr, &next_vpid) != NO_ERROR)
4597  {
4598  /* Give up. */
4600  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid, page_vpid.pageid);
4601  goto error;
4602  }
4603 
4604  /* Fix previous page if it is not same as header. */
4605  if (!VPID_ISNULL (&prev_vpid) && !VPID_EQ (&prev_vpid, &header_vpid))
4606  {
4607  if (pgbuf_ordered_fix (thread_p, &prev_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &prev_watcher) != NO_ERROR)
4608  {
4609  /* Give up. */
4611  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid,
4612  page_vpid.pageid);
4613  goto error;
4614  }
4615  }
4616 
4617  /* Fix next page if current page is not last in heap file. */
4618  if (!VPID_ISNULL (&next_vpid))
4619  {
4620  if (pgbuf_ordered_fix (thread_p, &next_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &next_watcher) != NO_ERROR)
4621  {
4622  /* Give up. */
4624  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid,
4625  page_vpid.pageid);
4626  goto error;
4627  }
4628  }
4629 
4630  /* All pages are fixed. */
4631 
4632  if (crt_watcher.page_was_unfixed)
4633  {
4634  *page_ptr = crt_watcher.pgptr; /* home was refixed */
4635 
4636  if (spage_number_of_records (crt_watcher.pgptr) > 1)
4637  {
4638  /* Current page has new data. It is no longer a candidate for removal. */
4640  "Candidate heap page %d|%d to remove was changed and has new data.", page_vpid.volid,
4641  page_vpid.pageid);
4642  goto error;
4643  }
4644  }
4645 
4646  /* recheck the dealloc flag after all latches are acquired */
4647  if (pgbuf_has_prevent_dealloc (crt_watcher.pgptr))
4648  {
4649  /* Even though we have fixed all required pages, somebody was doing a heap scan, and already reached our page. We
4650  * cannot deallocate it. */
4652  "Candidate heap page %d|%d to remove has waiters.", page_vpid.volid, page_vpid.pageid);
4653  goto error;
4654  }
4655 
4656  /* if we are here, the page should not be accessed by any active or vacuum workers. Active workers are prevented
4657  * from accessing it through heap scan, and direct references should not exist.
4658  * the function would not be called if any other vacuum workers would try to access the page. */
4659  if (pgbuf_has_any_waiters (crt_watcher.pgptr))
4660  {
4661  assert (false);
4662  vacuum_er_log_error (VACUUM_ER_LOG_HEAP, "%s", "Unexpected page waiters");
4663  goto error;
4664  }
4665  /* all good, we can deallocate the page */
4666 
4667  /* Start changes under the protection of system operation. */
4668  log_sysop_start (thread_p);
4669  is_system_op_started = true;
4670 
4671  /* Remove page from statistics in header page. */
4672  copy_recdes.data = PTR_ALIGN (copy_recdes_buffer, MAX_ALIGNMENT);
4673  copy_recdes.area_size = sizeof (heap_hdr);
4674  if (spage_get_record (thread_p, header_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &copy_recdes, COPY) != S_SUCCESS)
4675  {
4676  assert_release (false);
4678  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid, page_vpid.pageid);
4679  goto error;
4680  }
4681  memcpy (&heap_hdr, copy_recdes.data, sizeof (heap_hdr));
4682 
4683  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
4684  {
4685  if (VPID_EQ (&heap_hdr.estimates.best[i].vpid, &page_vpid))
4686  {
4687  VPID_SET_NULL (&heap_hdr.estimates.best[i].vpid);
4688  heap_hdr.estimates.best[i].freespace = 0;
4689  heap_hdr.estimates.head = i;
4690  heap_hdr.estimates.num_high_best--;
4691  }
4692  if (VPID_EQ (&heap_hdr.estimates.second_best[i], &page_vpid))
4693  {
4694  VPID_SET_NULL (&heap_hdr.estimates.second_best[i]);
4695  }
4696  }
4697  if (VPID_EQ (&heap_hdr.estimates.last_vpid, &page_vpid))
4698  {
4699  VPID_COPY (&heap_hdr.estimates.last_vpid, &prev_vpid);
4700  }
4701  if (VPID_EQ (&prev_vpid, &header_vpid))
4702  {
4703  /* Update next link. */
4704  VPID_COPY (&heap_hdr.next_vpid, &next_vpid);
4705  }
4706  if (VPID_EQ (&heap_hdr.estimates.full_search_vpid, &page_vpid))
4707  {
4709  }
4710 
4711  /* Update header and log changes. */
4712  update_recdes.data = (char *) &heap_hdr;
4713  update_recdes.length = sizeof (heap_hdr);
4714  if (spage_update (thread_p, header_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &update_recdes) != SP_SUCCESS)
4715  {
4716  assert_release (false);
4718  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid, page_vpid.pageid);
4719  goto error;
4720  }
4722  sizeof (heap_hdr), sizeof (heap_hdr), copy_recdes.data, update_recdes.data);
4723  pgbuf_set_dirty (thread_p, header_watcher.pgptr, DONT_FREE);
4724 
4725  /* Update links in previous and next page. */
4726 
4727  if (prev_watcher.pgptr != NULL)
4728  {
4729  /* Next link in previous page. */
4730  assert (!VPID_EQ (&header_vpid, &prev_vpid));
4731  copy_recdes.area_size = sizeof (chain);
4732  if (spage_get_record (thread_p, prev_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &copy_recdes, COPY) !=
4733  S_SUCCESS)
4734  {
4735  assert_release (false);
4737  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid,
4738  page_vpid.pageid);
4739  goto error;
4740  }
4741  memcpy (&chain, copy_recdes.data, copy_recdes.length);
4742  VPID_COPY (&chain.next_vpid, &next_vpid);
4743  update_recdes.data = (char *) &chain;
4744  update_recdes.length = sizeof (chain);
4745  if (spage_update (thread_p, prev_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &update_recdes) != SP_SUCCESS)
4746  {
4747  assert_release (false);
4749  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid,
4750  page_vpid.pageid);
4751  goto error;
4752  }
4754  sizeof (chain), sizeof (chain), copy_recdes.data, update_recdes.data);
4755  pgbuf_set_dirty (thread_p, prev_watcher.pgptr, DONT_FREE);
4756  }
4757 
4758  if (next_watcher.pgptr != NULL)
4759  {
4760  /* Previous link in next page. */
4761  copy_recdes.area_size = sizeof (chain);
4762  if (spage_get_record (thread_p, next_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &copy_recdes, COPY) !=
4763  S_SUCCESS)
4764  {
4765  assert_release (false);
4767  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid,
4768  page_vpid.pageid);
4769  goto error;
4770  }
4771  memcpy (&chain, copy_recdes.data, sizeof (chain));
4772  VPID_COPY (&chain.prev_vpid, &prev_vpid);
4773  update_recdes.data = (char *) &chain;
4774  update_recdes.length = sizeof (chain);
4775 
4776  if (spage_update (thread_p, next_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &update_recdes) != SP_SUCCESS)
4777  {
4778  assert_release (false);
4780  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid,
4781  page_vpid.pageid);
4782  goto error;
4783  }
4785  sizeof (chain), sizeof (chain), copy_recdes.data, update_recdes.data);
4786  pgbuf_set_dirty (thread_p, next_watcher.pgptr, DONT_FREE);
4787  }
4788 
4789  /* Unfix current page. */
4790  pgbuf_ordered_unfix_and_init (thread_p, *page_ptr, &crt_watcher);
4791  /* Deallocate current page. */
4792  if (file_dealloc (thread_p, &hfid->vfid, &page_vpid, FILE_HEAP) != NO_ERROR)
4793  {
4794  ASSERT_ERROR ();
4796  "Could not remove candidate empty heap page %d|%d.", page_vpid.volid, page_vpid.pageid);
4797  goto error;
4798  }
4799 
4800  /* Remove page from best space cached statistics. */
4801  (void) heap_stats_del_bestspace_by_vpid (thread_p, &page_vpid);
4802 
4803  /* Finished. */
4804  log_sysop_commit (thread_p);
4805  is_system_op_started = false;
4806 
4807  /* Unfix all pages. */
4808  if (next_watcher.pgptr != NULL)
4809  {
4810  pgbuf_ordered_unfix (thread_p, &next_watcher);
4811  }
4812  if (prev_watcher.pgptr != NULL)
4813  {
4814  pgbuf_ordered_unfix (thread_p, &prev_watcher);
4815  }
4816  pgbuf_ordered_unfix (thread_p, &header_watcher);
4817 
4818  /* Page removed successfully. */
4819  vacuum_er_log (VACUUM_ER_LOG_HEAP, "Successfully remove heap page %d|%d.", page_vpid.volid, page_vpid.pageid);
4820  return true;
4821 
4822 error:
4823  if (is_system_op_started)
4824  {
4825  log_sysop_abort (thread_p);
4826  }
4827  if (next_watcher.pgptr != NULL)
4828  {
4829  pgbuf_ordered_unfix (thread_p, &next_watcher);
4830  }
4831  if (prev_watcher.pgptr != NULL)
4832  {
4833  pgbuf_ordered_unfix (thread_p, &prev_watcher);
4834  }
4835  if (header_watcher.pgptr != NULL)
4836  {
4837  pgbuf_ordered_unfix (thread_p, &header_watcher);
4838  }
4839  if (*page_ptr != NULL)
4840  {
4841  if (*page_ptr != crt_watcher.pgptr)
4842  {
4843  /* jumped to here while fixing pages */
4844  assert (crt_watcher.page_was_unfixed);
4845  *page_ptr = crt_watcher.pgptr;
4846  }
4847  assert (crt_watcher.pgptr == *page_ptr);
4848  pgbuf_ordered_unfix_and_init (thread_p, *page_ptr, &crt_watcher);
4849  }
4850  else
4851  {
4852  assert (crt_watcher.pgptr == NULL);
4853  }
4854  /* Page was not removed. */
4855  return false;
4856 }
4857 
4858 /*
4859  * heap_vpid_next () - Find next page of heap
4860  * return: NO_ERROR
4861  * hfid(in): Object heap file identifier
4862  * pgptr(in): Current page pointer
4863  * next_vpid(in/out): Next volume-page identifier
4864  *
4865  * Note: Find the next page of heap file.
4866  */
4867 int
4868 heap_vpid_next (THREAD_ENTRY * thread_p, const HFID * hfid, PAGE_PTR pgptr, VPID * next_vpid)
4869 {
4870  HEAP_CHAIN *chain; /* Chain to next and prev page */
4871  HEAP_HDR_STATS *heap_hdr; /* Header of heap file */
4872  RECDES recdes; /* Record descriptor to page header */
4873  int ret = NO_ERROR;
4874 
4875  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
4876 
4877  /* Get either the heap header or chain record */
4878  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4879  {
4880  /* Unable to get header/chain record for the given page */
4881  VPID_SET_NULL (next_vpid);
4882  ret = ER_FAILED;
4883  }
4884  else
4885  {
4886  pgbuf_get_vpid (pgptr, next_vpid);
4887  /* Is this the header page ? */
4888  if (next_vpid->pageid == hfid->hpgid && next_vpid->volid == hfid->vfid.volid)
4889  {
4890  heap_hdr = (HEAP_HDR_STATS *) recdes.data;
4891  *next_vpid = heap_hdr->next_vpid;
4892  }
4893  else
4894  {
4895  chain = (HEAP_CHAIN *) recdes.data;
4896  *next_vpid = chain->next_vpid;
4897  }
4898  }
4899 
4900  return ret;
4901 }
4902 
4903 /*
4904  * heap_vpid_prev () - Find previous page of heap
4905  * return: NO_ERROR
4906  * hfid(in): Object heap file identifier
4907  * pgptr(in): Current page pointer
4908  * prev_vpid(in/out): Previous volume-page identifier
4909  *
4910  * Note: Find the previous page of heap file.
4911  */
4912 int
4913 heap_vpid_prev (THREAD_ENTRY * thread_p, const HFID * hfid, PAGE_PTR pgptr, VPID * prev_vpid)
4914 {
4915  HEAP_CHAIN *chain; /* Chain to next and prev page */
4916  RECDES recdes; /* Record descriptor to page header */
4917  int ret = NO_ERROR;
4918 
4919  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
4920 
4921  /* Get either the header or chain record */
4922  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
4923  {
4924  /* Unable to get header/chain record for the given page */
4925  VPID_SET_NULL (prev_vpid);
4926  ret = ER_FAILED;
4927  }
4928  else
4929  {
4930  pgbuf_get_vpid (pgptr, prev_vpid);
4931  /* Is this the header page ? */
4932  if (prev_vpid->pageid == hfid->hpgid && prev_vpid->volid == hfid->vfid.volid)
4933  {
4934  VPID_SET_NULL (prev_vpid);
4935  }
4936  else
4937  {
4938  chain = (HEAP_CHAIN *) recdes.data;
4939  *prev_vpid = chain->prev_vpid;
4940  }
4941  }
4942 
4943  return ret;
4944 }
4945 
4946 /*
4947  * heap_manager_initialize () -
4948  * return: NO_ERROR
4949  *
4950  * Note: Initialization process of the heap file module. Find the
4951  * maximum size of an object that can be inserted in the heap.
4952  * Objects that overpass this size are stored in overflow.
4953  */
4954 int
4956 {
4957  int ret;
4958 
4959 #define HEAP_MAX_FIRSTSLOTID_LENGTH (sizeof (HEAP_HDR_STATS))
4960 
4961  heap_Maxslotted_reclength = (spage_max_record_size () - HEAP_MAX_FIRSTSLOTID_LENGTH);
4962  heap_Slotted_overhead = spage_slot_size ();
4963 
4964  /* Initialize the class representation cache */
4965  ret = heap_chnguess_initialize ();
4966  if (ret != NO_ERROR)
4967  {
4968  return ret;
4969  }
4970 
4971  ret = heap_classrepr_initialize_cache ();
4972  if (ret != NO_ERROR)
4973  {
4974  return ret;
4975  }
4976 
4977  /* Initialize best space cache */
4978  ret = heap_stats_bestspace_initialize ();
4979  if (ret != NO_ERROR)
4980  {
4981  return ret;
4982  }
4983 
4984  /* Initialize class OID->HFID cache */
4985  ret = heap_initialize_hfid_table ();
4986 
4987  return ret;
4988 }
4989 
4990 /*
4991  * heap_manager_finalize () - Terminate the heap manager
4992  * return: NO_ERROR
4993  * Note: Deallocate any cached structure.
4994  */
4995 int
4997 {
4998  int ret;
4999 
5000  ret = heap_chnguess_finalize ();
5001  if (ret != NO_ERROR)
5002  {
5003  return ret;
5004  }
5005 
5006  ret = heap_classrepr_finalize_cache ();
5007  if (ret != NO_ERROR)
5008  {
5009  return ret;
5010  }
5011 
5012  ret = heap_stats_bestspace_finalize ();
5013  if (ret != NO_ERROR)
5014  {
5015  return ret;
5016  }
5017 
5019 
5020  return ret;
5021 }
5022 
5023 /*
5024  * heap_create_internal () - Create a heap file
5025  * return: HFID * (hfid on success and NULL on failure)
5026  * hfid(in/out): Object heap file identifier.
5027  * All fields in the identifier are set, except the volume
5028  * identifier which should have already been set by the caller.
5029  * exp_npgs(in): Expected number of pages
5030  * class_oid(in): OID of the class for which the heap will be created.
5031  * reuse_oid(in): if true, the OIDs of deleted instances will be reused
5032  *
5033  * Note: Creates a heap file on the disk volume associated with
5034  * hfid->vfid->volid.
5035  *
5036  * A set of sectors is allocated to improve locality of the heap.
5037  * The number of sectors to allocate is estimated from the number
5038  * of expected pages. The maximum number of allocated sectors is
5039  * 25% of the total number of sectors in disk. When the number of
5040  * pages cannot be estimated, a negative value can be passed to
5041  * indicate so. In this case, no sectors are allocated. The
5042  * number of expected pages are not allocated at this moment,
5043  * they are allocated as needs arrives.
5044  */
5045 static int
5046 heap_create_internal (THREAD_ENTRY * thread_p, HFID * hfid, const OID * class_oid, const bool reuse_oid)
5047 {
5048  HEAP_HDR_STATS heap_hdr; /* Heap file header */
5049  VPID vpid; /* Volume and page identifiers */
5050  RECDES recdes; /* Record descriptor */
5051  LOG_DATA_ADDR addr_hdr; /* Address of logging data */
5052  INT16 slotid;
5053  int sp_success;
5054  int i;
5055  FILE_DESCRIPTORS des;
5056  const FILE_TYPE file_type = reuse_oid ? FILE_HEAP_REUSE_SLOTS : FILE_HEAP;
5057  PAGE_TYPE ptype = PAGE_HEAP;
5058  OID null_oid = OID_INITIALIZER;
5059 
5060  int error_code = NO_ERROR;
5061 
5062  addr_hdr.pgptr = NULL;
5063  log_sysop_start (thread_p);
5064 
5065  if (class_oid == NULL)
5066  {
5067  class_oid = &null_oid;
5068  }
5069  memset (hfid, 0, sizeof (HFID));
5070  HFID_SET_NULL (hfid);
5071 
5072  memset (&des, 0, sizeof (des));
5073 
5075  {
5076  /*
5077  * Try to reuse an already mark deleted heap file
5078  */
5079 
5080  error_code = file_tracker_reuse_heap (thread_p, class_oid, hfid);
5081  if (error_code != NO_ERROR)
5082  {
5083  ASSERT_ERROR ();
5084  goto error;
5085  }
5086 
5087  if (!HFID_IS_NULL (hfid))
5088  {
5089  /* reuse heap file */
5090  if (heap_reuse (thread_p, hfid, class_oid, reuse_oid) == NULL)
5091  {
5092  ASSERT_ERROR_AND_SET (error_code);
5093  goto error;
5094  }
5095  error_code = heap_insert_hfid_for_class_oid (thread_p, class_oid, hfid, file_type);
5096  if (error_code != NO_ERROR)
5097  {
5098  /* could not cache */
5099  assert_release (false);
5100  goto error;
5101  }
5102  /* reuse successful */
5103  goto end;
5104  }
5105  }
5106 
5107  /*
5108  * Create the unstructured file for the heap
5109  * Create the header for the heap file. The header is used to speed
5110  * up insertions of objects and to find some simple information about the
5111  * heap.
5112  * We do not initialize the page during the allocation since the file is
5113  * new, and the file is going to be removed in the event of a crash.
5114  */
5115 
5116  error_code = file_create_heap (thread_p, reuse_oid, class_oid, &hfid->vfid);
5117  if (error_code != NO_ERROR)
5118  {
5119  ASSERT_ERROR ();
5120  goto error;
5121  }
5122  error_code = file_alloc_sticky_first_page (thread_p, &hfid->vfid, file_init_page_type, &ptype, &vpid,
5123  &addr_hdr.pgptr);
5124  if (error_code != NO_ERROR)
5125  {
5126  ASSERT_ERROR ();
5127  goto error;
5128  }
5129  if (vpid.volid != hfid->vfid.volid)
5130  {
5131  /* we got problems */
5132  assert_release (false);
5133  error_code = ER_FAILED;
5134  goto error;
5135  }
5136  if (addr_hdr.pgptr == NULL)
5137  {
5138  /* something went wrong, destroy the file, and return */
5139  assert_release (false);
5140  error_code = ER_FAILED;
5141  goto error;
5142  }
5143 
5144  hfid->hpgid = vpid.pageid;
5145 
5146  /* update file descriptor to include class and hfid */
5147  des.heap.class_oid = *class_oid;
5148  des.heap.hfid = *hfid;
5149  error_code = file_descriptor_update (thread_p, &hfid->vfid, &des);
5150  if (error_code != NO_ERROR)
5151  {
5152  ASSERT_ERROR ();
5153  goto error;
5154  }
5155 
5156  error_code = heap_insert_hfid_for_class_oid (thread_p, class_oid, hfid, file_type);
5157  if (error_code != NO_ERROR)
5158  {
5159  /* Failed to cache HFID. */
5160  assert_release (false);
5161  goto error;
5162  }
5163 
5164  (void) heap_stats_del_bestspace_by_hfid (thread_p, hfid);
5165 
5166  pgbuf_set_page_ptype (thread_p, addr_hdr.pgptr, PAGE_HEAP);
5167 
5168  /* Initialize header page */
5169  spage_initialize (thread_p, addr_hdr.pgptr, heap_get_spage_type (), HEAP_MAX_ALIGN, SAFEGUARD_RVSPACE);
5170 
5171  /* Now insert header */
5172  memset (&heap_hdr, 0, sizeof (heap_hdr));
5173  heap_hdr.class_oid = *class_oid;
5174  VFID_SET_NULL (&heap_hdr.ovf_vfid);
5175  VPID_SET_NULL (&heap_hdr.next_vpid);
5176 
5178 
5179  heap_hdr.estimates.num_pages = 1;
5180  heap_hdr.estimates.num_recs = 0;
5181  heap_hdr.estimates.recs_sumlen = 0.0;
5182 
5183  heap_hdr.estimates.best[0].vpid.volid = hfid->vfid.volid;
5184  heap_hdr.estimates.best[0].vpid.pageid = hfid->hpgid;
5185  heap_hdr.estimates.best[0].freespace = spage_max_space_for_new_record (thread_p, addr_hdr.pgptr);
5186 
5187  heap_hdr.estimates.head = 1;
5188  for (i = heap_hdr.estimates.head; i < HEAP_NUM_BEST_SPACESTATS; i++)
5189  {
5190  VPID_SET_NULL (&heap_hdr.estimates.best[i].vpid);
5191  heap_hdr.estimates.best[i].freespace = 0;
5192  }
5193 
5194  heap_hdr.estimates.num_high_best = 1;
5195  heap_hdr.estimates.num_other_high_best = 0;
5196 
5197  heap_hdr.estimates.num_second_best = 0;
5198  heap_hdr.estimates.head_second_best = 0;
5199  heap_hdr.estimates.tail_second_best = 0;
5200  heap_hdr.estimates.num_substitutions = 0;
5201 
5202  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
5203  {
5204  VPID_SET_NULL (&heap_hdr.estimates.second_best[i]);
5205  }
5206 
5207  heap_hdr.estimates.last_vpid.volid = hfid->vfid.volid;
5208  heap_hdr.estimates.last_vpid.pageid = hfid->hpgid;
5209 
5212 
5214  recdes.type = REC_HOME;
5215  recdes.data = (char *) &heap_hdr;
5216 
5217  sp_success = spage_insert (thread_p, addr_hdr.pgptr, &recdes, &slotid);
5218  if (sp_success != SP_SUCCESS || slotid != HEAP_HEADER_AND_CHAIN_SLOTID)
5219  {
5220  assert (false);
5221  /* something went wrong, destroy file and return error */
5222  if (sp_success != SP_SUCCESS)
5223  {
5226  }
5227 
5228  /* Free the page and release the lock */
5229  error_code = ER_HEAP_UNABLE_TO_CREATE_HEAP;
5230  goto error;
5231  }
5232  else
5233  {
5234  /*
5235  * Don't need to log before image (undo) since file and pages of the heap
5236  * are deallocated during undo (abort).
5237  */
5238  addr_hdr.vfid = &hfid->vfid;
5240  log_append_redo_data (thread_p, RVHF_CREATE_HEADER, &addr_hdr, sizeof (heap_hdr), &heap_hdr);
5241  pgbuf_set_dirty (thread_p, addr_hdr.pgptr, FREE);
5242  addr_hdr.pgptr = NULL;
5243  }
5244 
5245 end:
5246  assert (error_code == NO_ERROR);
5247 
5248  log_sysop_attach_to_outer (thread_p);
5250 
5251  LOG_CS_ENTER (thread_p);
5252  logpb_flush_pages_direct (thread_p);
5253  LOG_CS_EXIT (thread_p);
5254 
5255  return NO_ERROR;
5256 
5257 error:
5258  assert (error_code != NO_ERROR);
5259 
5260  if (addr_hdr.pgptr != NULL)
5261  {
5262  pgbuf_unfix_and_init (thread_p, addr_hdr.pgptr);
5263  }
5264 
5266  hfid->hpgid = NULL_PAGEID;
5267 
5268  log_sysop_abort (thread_p);
5269  return error_code;
5270 }
5271 
5272 /*
5273  * heap_delete_all_page_records () -
5274  * return: false if nothing is deleted, otherwise true
5275  * vpid(in): the vpid of the page
5276  * pgptr(in): PAGE_PTR to the page
5277  */
5278 static bool
5279 heap_delete_all_page_records (THREAD_ENTRY * thread_p, const VPID * vpid, PAGE_PTR pgptr)
5280 {
5281  bool something_deleted = false;
5282  OID oid;
5283  RECDES recdes;
5284 
5285  assert (pgptr != NULL);
5286  assert (vpid != NULL);
5287 
5288  oid.volid = vpid->volid;
5289  oid.pageid = vpid->pageid;
5290  oid.slotid = NULL_SLOTID;
5291 
5292  while (true)
5293  {
5294  if (spage_next_record (pgptr, &oid.slotid, &recdes, PEEK) != S_SUCCESS)
5295  {
5296  break;
5297  }
5299  {
5300  continue;
5301  }
5302  (void) spage_delete (thread_p, pgptr, oid.slotid);
5303  something_deleted = true;
5304  }
5305 
5306  return something_deleted;
5307 }
5308 
5309 /*
5310  * heap_reinitialize_page () -
5311  * return: NO_ERROR if succeed, otherwise error code
5312  * pgptr(in): PAGE_PTR to the page
5313  * is_header_page(in): true if the page is the header page
5314  */
5315 static int
5316 heap_reinitialize_page (THREAD_ENTRY * thread_p, PAGE_PTR pgptr, const bool is_header_page)
5317 {
5318  HEAP_CHAIN tmp_chain;
5319  HEAP_HDR_STATS tmp_hdr_stats;
5320  PGSLOTID slotid = NULL_SLOTID;
5321  RECDES recdes;
5322  int error_code = NO_ERROR;
5323 
5324  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
5325  {
5327  error_code = ER_GENERIC_ERROR;
5328  goto error_exit;
5329  }
5330 
5331  if (is_header_page)
5332  {
5333  assert (recdes.length == sizeof (HEAP_HDR_STATS));
5334  tmp_hdr_stats = *(HEAP_HDR_STATS *) recdes.data;
5335  recdes.data = (char *) &tmp_hdr_stats;
5336  recdes.area_size = recdes.length = sizeof (tmp_hdr_stats);
5337  recdes.type = REC_HOME;
5338  }
5339  else
5340  {
5341  assert (recdes.length == sizeof (HEAP_CHAIN));
5342  tmp_chain = *(HEAP_CHAIN *) recdes.data;
5343  recdes.data = (char *) &tmp_chain;
5344  recdes.area_size = recdes.length = sizeof (tmp_chain);
5345  recdes.type = REC_HOME;
5346  }
5347 
5348  (void) pgbuf_set_page_ptype (thread_p, pgptr, PAGE_HEAP);
5349 
5350  /* Initialize header page */
5351  spage_initialize (thread_p, pgptr, heap_get_spage_type (), HEAP_MAX_ALIGN, SAFEGUARD_RVSPACE);
5352 
5353  if (spage_insert (thread_p, pgptr, &recdes, &slotid) != SP_SUCCESS || slotid != HEAP_HEADER_AND_CHAIN_SLOTID)
5354  {
5356  error_code = ER_GENERIC_ERROR;
5357  goto error_exit;
5358  }
5359  else
5360  {
5361  /* All is well and the page is now empty. */
5362  }
5363 
5364  return error_code;
5365 
5366 error_exit:
5367  if (error_code == NO_ERROR)
5368  {
5369  error_code = ER_GENERIC_ERROR;
5370  }
5371  return error_code;
5372 }
5373 
5374 /*
5375  * heap_reuse () - Reuse a heap
5376  * return: HFID * (hfid on success and NULL on failure)
5377  * hfid(in): Object heap file identifier.
5378  * class_oid(in): OID of the class for which the heap will be created.
5379  *
5380  * Note: Clean the given heap file so that it can be reused.
5381  * Note: The heap file must have been permanently marked as deleted.
5382  */
5383 static const HFID *
5384 heap_reuse (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * class_oid, const bool reuse_oid)
5385 {
5386  VPID vpid; /* Volume and page identifiers */
5387  PAGE_PTR hdr_pgptr = NULL; /* Page pointer to header page */
5388  PAGE_PTR pgptr = NULL; /* Page pointer */
5389  LOG_DATA_ADDR addr; /* Address of logging data */
5390  HEAP_HDR_STATS *heap_hdr = NULL; /* Header of heap structure */
5391  HEAP_CHAIN *chain; /* Chain to next and prev page */
5392  RECDES recdes;
5393  VPID last_vpid;
5394  int is_header_page;
5395  int npages = 0;
5396  int i;
5397  bool need_update;
5398 
5399  assert (class_oid != NULL);
5400  assert (!OID_ISNULL (class_oid));
5401 
5402  VPID_SET_NULL (&last_vpid);
5403  addr.vfid = &hfid->vfid;
5404 
5405  /*
5406  * Read the header page.
5407  * We lock the header page in exclusive mode.
5408  */
5409 
5410  vpid.volid = hfid->vfid.volid;
5411  vpid.pageid = hfid->hpgid;
5413  if (hdr_pgptr == NULL)
5414  {
5415  return NULL;
5416  }
5417 
5418  (void) pgbuf_check_page_ptype (thread_p, hdr_pgptr, PAGE_HEAP);
5419 
5420  /*
5421  * Start scanning every page of the heap and removing the objects.
5422  * Note that, for normal heap files, the slot is not removed since we do not
5423  * know if the objects are pointed by some other objects in the database.
5424  * For reusable OID heap files we are certain there can be no references to
5425  * the objects so we can simply initialize the slotted page.
5426  */
5427  /*
5428  * Note Because the objects of reusable OID heaps are not referenced,
5429  * reusing such heaps provides no actual benefit. We might consider
5430  * giving up the reuse heap mechanism for reusable OID heaps in the
5431  * future.
5432  */
5433 
5434  while (!(VPID_ISNULL (&vpid)))
5435  {
5436  /*
5437  * Fetch the page
5438  */
5440  if (pgptr == NULL)
5441  {
5442  goto error;
5443  }
5444 
5445  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
5446 
5447  is_header_page = (hdr_pgptr == pgptr) ? 1 : 0;
5448 
5449  /*
5450  * Remove all the objects in this page
5451  */
5452  if (!reuse_oid)
5453  {
5454  (void) heap_delete_all_page_records (thread_p, &vpid, pgptr);
5455 
5456  addr.pgptr = pgptr;
5457  addr.offset = is_header_page;
5458  log_append_redo_data (thread_p, RVHF_REUSE_PAGE, &addr, sizeof (*class_oid), class_oid);
5459  }
5460  else
5461  {
5462  if (spage_number_of_slots (pgptr) > 1)
5463  {
5464  if (heap_reinitialize_page (thread_p, pgptr, is_header_page) != NO_ERROR)
5465  {
5466  goto error;
5467  }
5468  }
5469 
5470  addr.pgptr = pgptr;
5471  addr.offset = is_header_page;
5472  log_append_redo_data (thread_p, RVHF_REUSE_PAGE_REUSE_OID, &addr, sizeof (*class_oid), class_oid);
5473  }
5474 
5475  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
5476  {
5477  goto error;
5478  }
5479  if (recdes.data == NULL)
5480  {
5481  goto error;
5482  }
5483 
5484  /* save new class oid in the page. it dirties the page. */
5485  if (is_header_page)
5486  {
5487  heap_hdr = (HEAP_HDR_STATS *) recdes.data;
5488  COPY_OID (&(heap_hdr->class_oid), class_oid);
5489  }
5490  else
5491  {
5492  chain = (HEAP_CHAIN *) recdes.data;
5493  COPY_OID (&(chain->class_oid), class_oid);
5494  chain->max_mvccid = MVCCID_NULL;
5495  chain->flags = 0;
5497  }
5498 
5499  if (npages < HEAP_NUM_BEST_SPACESTATS)
5500  {
5501  heap_hdr->estimates.best[npages].vpid = vpid;
5502  heap_hdr->estimates.best[npages].freespace =
5503  spage_get_free_space_without_saving (thread_p, pgptr, &need_update);
5504 
5505  }
5506 
5508  {
5509  (void) heap_stats_add_bestspace (thread_p, hfid, &vpid, DB_PAGESIZE);
5510  }
5511 
5512  npages++;
5513  last_vpid = vpid;
5514 
5515  /*
5516  * Find next page to scan and free the current page
5517  */
5518  if (heap_vpid_next (thread_p, hfid, pgptr, &vpid) != NO_ERROR)
5519  {
5520  goto error;
5521  }
5522 
5523  pgbuf_set_dirty (thread_p, pgptr, FREE);
5524  pgptr = NULL;
5525  }
5526 
5527  /*
5528  * Reset the statistics. Set statistics for insertion back to first page
5529  * and reset unfill space according to new parameters
5530  */
5531  VFID_SET_NULL (&heap_hdr->ovf_vfid);
5533  heap_hdr->estimates.num_pages = npages;
5534  heap_hdr->estimates.num_recs = 0;
5535  heap_hdr->estimates.recs_sumlen = 0.0;
5536 
5537  if (npages < HEAP_NUM_BEST_SPACESTATS)
5538  {
5539  heap_hdr->estimates.num_high_best = npages;
5540  heap_hdr->estimates.num_other_high_best = 0;
5541  }
5542  else
5543  {
5546  }
5547 
5548  heap_hdr->estimates.head = 0;
5549  for (i = npages; i < HEAP_NUM_BEST_SPACESTATS; i++)
5550  {
5551  VPID_SET_NULL (&heap_hdr->estimates.best[i].vpid);
5552  heap_hdr->estimates.best[i].freespace = 0;
5553  }
5554 
5555  heap_hdr->estimates.last_vpid = last_vpid;
5556 
5557  addr.pgptr = hdr_pgptr;
5559  log_append_redo_data (thread_p, RVHF_STATS, &addr, sizeof (*heap_hdr), heap_hdr);
5560  pgbuf_set_dirty (thread_p, hdr_pgptr, FREE);
5561  hdr_pgptr = NULL;
5562 
5563  return hfid;
5564 
5565 error:
5566  if (pgptr != NULL)
5567  {
5568  pgbuf_unfix_and_init (thread_p, pgptr);
5569  }
5570  if (hdr_pgptr != NULL)
5571  {
5572  pgbuf_unfix_and_init (thread_p, hdr_pgptr);
5573  }
5574 
5575  return NULL;
5576 }
5577 
5578 #if defined(CUBRID_DEBUG)
5579 /*
5580  * heap_hfid_isvalid () -
5581  * return:
5582  * hfid(in):
5583  */
5584 static DISK_ISVALID
5585 heap_hfid_isvalid (HFID * hfid)
5586 {
5587  DISK_ISVALID valid_pg = DISK_VALID;
5588 
5589  if (hfid == NULL || HFID_IS_NULL (hfid))
5590  {
5591  return DISK_INVALID;
5592  }
5593 
5595  if (valid_pg == DISK_VALID)
5596  {
5598  }
5599 
5600  return valid_pg;
5601 }
5602 
5603 /*
5604  * heap_scanrange_isvalid () -
5605  * return:
5606  * scan_range(in):
5607  */
5608 static DISK_ISVALID
5609 heap_scanrange_isvalid (HEAP_SCANRANGE * scan_range)
5610 {
5611  DISK_ISVALID valid_pg = DISK_INVALID;
5612 
5613  if (scan_range != NULL)
5614  {
5615  valid_pg = heap_hfid_isvalid (&scan_range->scan_cache.hfid);
5616  }
5617 
5618  if (valid_pg != DISK_VALID)
5619  {
5620  if (valid_pg != DISK_ERROR)
5621  {
5622  er_log_debug (ARG_FILE_LINE, " ** SYSTEM ERROR scanrange has not been initialized");
5624  }
5625  }
5626 
5627  return valid_pg;
5628 }
5629 #endif /* CUBRID_DEBUG */
5630 
5631 /*
5632  * xheap_create () - Create a heap file
5633  * return: int
5634  * hfid(in/out): Object heap file identifier.
5635  * All fields in the identifier are set, except the volume
5636  * identifier which should have already been set by the caller.
5637  * class_oid(in): OID of the class for which the heap will be created.
5638  * reuse_oid(int):
5639  *
5640  * Note: Creates an object heap file on the disk volume associated with
5641  * hfid->vfid->volid.
5642  */
5643 int
5644 xheap_create (THREAD_ENTRY * thread_p, HFID * hfid, const OID * class_oid, bool reuse_oid)
5645 {
5646  return heap_create_internal (thread_p, hfid, class_oid, reuse_oid);
5647 }
5648 
5649 /*
5650  * xheap_destroy () - Destroy a heap file
5651  * return: int
5652  * hfid(in): Object heap file identifier.
5653  * class_oid(in):
5654  *
5655  * Note: Destroy the heap file associated with the given heap identifier.
5656  */
5657 int
5658 xheap_destroy (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * class_oid)
5659 {
5660  VFID vfid;
5661  LOG_DATA_ADDR addr;
5662 
5664 
5665  addr.vfid = NULL;
5666  addr.pgptr = NULL;
5667  addr.offset = -1;
5668  if (heap_ovf_find_vfid (thread_p, hfid, &vfid, false, PGBUF_UNCONDITIONAL_LATCH) != NULL)
5669  {
5670  file_postpone_destroy (thread_p, &vfid);
5671  }
5672 
5673  file_postpone_destroy (thread_p, &hfid->vfid);
5674 
5675  (void) heap_stats_del_bestspace_by_hfid (thread_p, hfid);
5676 
5677  return NO_ERROR;
5678 }
5679 
5680 /*
5681  * xheap_destroy_newly_created () - Destroy heap if it is a newly created heap
5682  * return: NO_ERROR
5683  * hfid(in): Object heap file identifier.
5684  * class_oid(in): class OID
5685  *
5686  * Note: Destroy the heap file associated with the given heap
5687  * identifier if it is a newly created heap file.
5688  */
5689 int
5690 xheap_destroy_newly_created (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * class_oid)
5691 {
5692  VFID vfid;
5693  FILE_TYPE file_type;
5694  int ret;
5696 
5697  ret = file_get_type (thread_p, &hfid->vfid, &file_type);
5698  if (ret != NO_ERROR)
5699  {
5700  ASSERT_ERROR ();
5701  return ret;
5702  }
5703  if (file_type == FILE_HEAP_REUSE_SLOTS)
5704  {
5705  ret = xheap_destroy (thread_p, hfid, class_oid);
5706  return ret;
5707  }
5708 
5710 
5711  if (heap_ovf_find_vfid (thread_p, hfid, &vfid, false, PGBUF_UNCONDITIONAL_LATCH) != NULL)
5712  {
5713  file_postpone_destroy (thread_p, &vfid);
5714  }
5715 
5716  log_append_postpone (thread_p, RVHF_MARK_DELETED, &addr, sizeof (hfid->vfid), &hfid->vfid);
5717 
5718  (void) heap_stats_del_bestspace_by_hfid (thread_p, hfid);
5719 
5720  return ret;
5721 }
5722 
5723 /*
5724  * heap_rv_mark_deleted_on_undo () - mark heap file as deleted on undo
5725  *
5726  * return : error code
5727  * thread_p (in) : thread entry
5728  * rcv (in) : recovery data
5729  */
5730 int
5732 {
5733  int error_code = file_rv_tracker_mark_heap_deleted (thread_p, rcv, true);
5734  if (error_code != NO_ERROR)
5735  {
5736  assert_release (false);
5737  }
5738  return error_code;
5739 }
5740 
5741 /*
5742  * heap_rv_mark_deleted_on_postpone () - mark heap file as deleted on postpone
5743  *
5744  * return : error code
5745  * thread_p (in) : thread entry
5746  * rcv (in) : recovery data
5747  */
5748 int
5750 {
5751  int error_code = file_rv_tracker_mark_heap_deleted (thread_p, rcv, false);
5752  if (error_code != NO_ERROR)
5753  {
5754  assert_release (false);
5755  }
5756  return error_code;
5757 }
5758 
5759 /*
5760  * heap_assign_address () - Assign a new location
5761  * return: NO_ERROR / ER_FAILED
5762  * hfid(in): Object heap file identifier
5763  * class_oid(in): class identifier
5764  * oid(out): Object identifier.
5765  * expected_length(in): Expected length
5766  *
5767  * Note: Assign an OID to an object and reserve the expected length for
5768  * the object. The following rules are observed for the expected length.
5769  * 1. A negative value is passed when only an approximation of
5770  * the length of the object is known. This approximation is
5771  * taken as the minimal length by this module. This case is
5772  * used when the transformer module (tfcl) skips some fileds
5773  * while walking through the object to find out its length.
5774  * a) Heap manager find the average length of objects in the
5775  * heap.
5776  * If the average length > abs(expected_length)
5777  * The average length is used instead
5778  * 2. A zero value, heap manager uses the average length of the
5779  * objects in the heap.
5780  * 3. If length is larger than one page, the size of an OID is
5781  * used since the object is going to be stored in overflow
5782  * 4. If length is > 0 and smaller than OID_SIZE
5783  * OID_SIZE is used as the expected length.
5784  */
5785 int
5786 heap_assign_address (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * oid, int expected_length)
5787 {
5788  HEAP_OPERATION_CONTEXT insert_context;
5789  RECDES recdes;
5790  int rc;
5791 
5792  if (expected_length <= 0)
5793  {
5794  recdes.length = heap_estimate_avg_length (thread_p, hfid);
5795  if (recdes.length > (-expected_length))
5796  {
5797  expected_length = recdes.length;
5798  }
5799  else
5800  {
5801  expected_length = -expected_length;
5802  }
5803  }
5804 
5805  /*
5806  * Use the expected length only when it is larger than the size of an OID
5807  * and it is smaller than the maximum size of an object that can be stored
5808  * in the primary area (no in overflow). In any other case, use the the size
5809  * of an OID as the length.
5810  */
5811 
5812  recdes.length =
5813  ((expected_length > SSIZEOF (OID) && !heap_is_big_length (expected_length)) ? expected_length : SSIZEOF (OID));
5814 
5815  recdes.data = NULL;
5817 
5818  /* create context */
5819  heap_create_insert_context (&insert_context, (HFID *) hfid, class_oid, &recdes, NULL);
5820 
5821  /* insert */
5822  rc = heap_insert_logical (thread_p, &insert_context);
5823  if (rc != NO_ERROR)
5824  {
5825  return rc;
5826  }
5827 
5828  /* get result and exit */
5829  COPY_OID (oid, &insert_context.res_oid);
5830  return NO_ERROR;
5831 }
5832 
5833 /*
5834  * heap_flush () - Flush all dirty pages where the object resides
5835  * return:
5836  * oid(in): Object identifier
5837  *
5838  * Note: Flush all dirty pages where the object resides.
5839  */
5840 void
5841 heap_flush (THREAD_ENTRY * thread_p, const OID * oid)
5842 {
5843  VPID vpid; /* Volume and page identifiers */
5844  PAGE_PTR pgptr = NULL; /* Page pointer */
5845  INT16 type;
5846  OID forward_oid;
5847  RECDES forward_recdes;
5848  int ret = NO_ERROR;
5849 
5850  if (HEAP_ISVALID_OID (thread_p, oid) != DISK_VALID)
5851  {
5852  return;
5853  }
5854 
5855  /*
5856  * Lock and fetch the page where the object is stored
5857  */
5858  vpid.volid = oid->volid;
5859  vpid.pageid = oid->pageid;
5860  pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, NULL);
5861  if (pgptr == NULL)
5862  {
5863  if (er_errid () == ER_PB_BAD_PAGEID)
5864  {
5866  }
5867  /* something went wrong, return */
5868  return;
5869  }
5870 
5871  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
5872 
5873  type = spage_get_record_type (pgptr, oid->slotid);
5874  if (type == REC_UNKNOWN)
5875  {
5876  goto end;
5877  }
5878 
5879  /* If this page is dirty flush it */
5880  (void) pgbuf_flush_with_wal (thread_p, pgptr);
5881 
5882  switch (type)
5883  {
5884  case REC_RELOCATION:
5885  /*
5886  * The object stored on the page is a relocation record. The relocation
5887  * record is used as a map to find the actual location of the content of
5888  * the object.
5889  */
5890 
5891  forward_recdes.data = (char *) &forward_oid;
5892  forward_recdes.area_size = OR_OID_SIZE;
5893 
5894  if (spage_get_record (thread_p, pgptr, oid->slotid, &forward_recdes, COPY) != S_SUCCESS)
5895  {
5896  /* Unable to get relocation record of the object */
5897  goto end;
5898  }
5899  pgbuf_unfix_and_init (thread_p, pgptr);
5900 
5901  /* Fetch the new home page */
5902  vpid.volid = forward_oid.volid;
5903  vpid.pageid = forward_oid.pageid;
5904 
5905  pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, NULL);
5906  if (pgptr == NULL)
5907  {
5908  if (er_errid () == ER_PB_BAD_PAGEID)
5909  {
5911  forward_oid.pageid, forward_oid.slotid);
5912  }
5913 
5914  return;
5915  }
5916 
5917  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
5918 
5919  (void) pgbuf_flush_with_wal (thread_p, pgptr);
5920  break;
5921 
5922  case REC_BIGONE:
5923  /*
5924  * The object stored in the heap page is a relocation_overflow record,
5925  * get the overflow address of the object
5926  */
5927  forward_recdes.data = (char *) &forward_oid;
5928  forward_recdes.area_size = OR_OID_SIZE;
5929 
5930  if (spage_get_record (thread_p, pgptr, oid->slotid, &forward_recdes, COPY) != S_SUCCESS)
5931  {
5932  /* Unable to peek overflow address of multipage object */
5933  goto end;
5934  }
5935  pgbuf_unfix_and_init (thread_p, pgptr);
5936  ret = heap_ovf_flush (thread_p, &forward_oid);
5937  break;
5938 
5939  case REC_ASSIGN_ADDRESS:
5940  case REC_HOME:
5941  case REC_NEWHOME:
5942  case REC_MARKDELETED:
5944  default:
5945  break;
5946  }
5947 
5948 end:
5949  if (pgptr != NULL)
5950  {
5951  pgbuf_unfix_and_init (thread_p, pgptr);
5952  }
5953 }
5954 
5955 /*
5956  * xheap_reclaim_addresses () - Reclaim addresses/OIDs and delete empty pages
5957  * return: NO_ERROR
5958  * hfid(in): Heap file identifier
5959  *
5960  * Note: Reclaim the addresses (OIDs) of deleted objects of the given heap and
5961  * delete all the heap pages that are left empty.
5962  *
5963  * This function can be called:
5964  * a: When there are no more references to deleted objects of the given
5965  * heap. This happens during offline compactdb execution after all the
5966  * classes in the schema have been processed by the process_class ()
5967  * function that sets the references to deleted objects to NULL.
5968  * b: When we are sure there can be no references to any object of the
5969  * associated class. This happens during online compactdb execution when
5970  * all the classes in the schema are checked to see if can they point to
5971  * instances of the current class by checking all their atributes'
5972  * domains.
5973  *
5974  * If references to deleted objects were nulled by the current
5975  * transaction some recovery problems may happen in the case of a crash
5976  * since the reclaiming of the addresses is done without logging (or
5977  * very little one) and thus it cannot be fully undone. Some logging is
5978  * done to make sure that media recovery will not be impacted. This was
5979  * done to avoid a lot of unneeded logging. Thus, if the caller was
5980  * setting references to deleted objects to NULL, the caller must commit
5981  * his transaction before this function is invoked.
5982  *
5983  * This function must be run:
5984  * a: offline, that is, when the user is the only one using the database
5985  * system.
5986  * b: online while holding an exclusive lock on the associated class.
5987  */
5988 int
5990 {
5991  VPID vpid;
5992  VPID prv_vpid;
5993  int best, i;
5994  HEAP_HDR_STATS initial_heap_hdr;
5995  HEAP_HDR_STATS heap_hdr;
5996  RECDES hdr_recdes;
5997  LOG_DATA_ADDR addr;
5998  int ret = NO_ERROR;
5999  int free_space;
6000  int npages, nrecords, rec_length;
6001  bool need_update;
6002  PGBUF_WATCHER hdr_page_watcher;
6003  PGBUF_WATCHER curr_page_watcher;
6004 
6005  PGBUF_INIT_WATCHER (&hdr_page_watcher, PGBUF_ORDERED_HEAP_HDR, hfid);
6006  PGBUF_INIT_WATCHER (&curr_page_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
6007 
6008  addr.vfid = &hfid->vfid;
6009  addr.pgptr = NULL;
6010  addr.offset = 0;
6011 
6012  vpid.volid = hfid->vfid.volid;
6013  vpid.pageid = hfid->hpgid;
6014 
6015  ret = pgbuf_ordered_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &hdr_page_watcher);
6016  if (ret != NO_ERROR)
6017  {
6018  goto exit_on_error;
6019  }
6020 
6021  (void) pgbuf_check_page_ptype (thread_p, hdr_page_watcher.pgptr, PAGE_HEAP);
6022 
6023  hdr_recdes.data = (char *) &heap_hdr;
6024  hdr_recdes.area_size = sizeof (heap_hdr);
6025 
6026  if (spage_get_record (thread_p, hdr_page_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, COPY) != S_SUCCESS)
6027  {
6028  goto exit_on_error;
6029  }
6030  prv_vpid = heap_hdr.estimates.last_vpid;
6031 
6032  /* Copy the header to memory.. so we can log the changes */
6033  memcpy (&initial_heap_hdr, hdr_recdes.data, sizeof (initial_heap_hdr));
6034 
6035  /*
6036  * Initialize best estimates
6037  */
6038  heap_hdr.estimates.num_pages = 0;
6039  heap_hdr.estimates.num_recs = 0;
6040  heap_hdr.estimates.recs_sumlen = 0.0;
6041  heap_hdr.estimates.num_high_best = 0;
6042  heap_hdr.estimates.num_other_high_best = 0;
6043  heap_hdr.estimates.head = 0;
6044 
6045  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
6046  {
6047  VPID_SET_NULL (&heap_hdr.estimates.best[i].vpid);
6048  heap_hdr.estimates.best[0].freespace = 0;
6049  }
6050 
6051  /* Initialize second best estimates */
6052  heap_hdr.estimates.num_second_best = 0;
6053  heap_hdr.estimates.head_second_best = 0;
6054  heap_hdr.estimates.tail_second_best = 0;
6055  heap_hdr.estimates.num_substitutions = 0;
6056 
6057  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
6058  {
6059  VPID_SET_NULL (&heap_hdr.estimates.second_best[i]);
6060  }
6061 
6062  /* initialize full_search_vpid */
6065 
6066  best = 0;
6067 
6068  while (!(VPID_ISNULL (&prv_vpid)))
6069  {
6070  vpid = prv_vpid;
6071  curr_page_watcher.pgptr =
6072  heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, X_LOCK, NULL, &curr_page_watcher);
6073  if (curr_page_watcher.pgptr == NULL)
6074  {
6075  goto exit_on_error;
6076  }
6077 
6078  (void) pgbuf_check_page_ptype (thread_p, curr_page_watcher.pgptr, PAGE_HEAP);
6079 
6080  if (heap_vpid_prev (thread_p, hfid, curr_page_watcher.pgptr, &prv_vpid) != NO_ERROR)
6081  {
6082  pgbuf_ordered_unfix (thread_p, &curr_page_watcher);
6083 
6084  goto exit_on_error;
6085  }
6086 
6087  /*
6088  * Are there any objects in this page ?
6089  * Compare against > 1 since every heap page contains a header record
6090  * (heap header or chain).
6091  */
6092 
6093  if (spage_number_of_records (curr_page_watcher.pgptr) > 1
6094  || (vpid.pageid == hfid->hpgid && vpid.volid == hfid->vfid.volid))
6095  {
6096  if (spage_reclaim (thread_p, curr_page_watcher.pgptr) == true)
6097  {
6098  addr.pgptr = curr_page_watcher.pgptr;
6099  /*
6100  * If this function is called correctly (see the notes in the
6101  * header comment about the preconditions) we can skip the
6102  * logging of spage_reclaim (). Logging for REDO would add many
6103  * log records for any compactdb operation and would only
6104  * benefit the infrequent scenario of compactdb operations that
6105  * crash right at the end. UNDO operations are not absolutely
6106  * required because the deleted OIDs should be unreferenced
6107  * anyway; there should be no harm in reusing them. Basically,
6108  * since the call to spage_reclaim () should leave the database
6109  * logically unmodified, neither REDO nor UNDO are required.
6110  */
6111  log_skip_logging (thread_p, &addr);
6112  pgbuf_set_dirty (thread_p, curr_page_watcher.pgptr, DONT_FREE);
6113  }
6114  }
6115 
6116  /*
6117  * Throw away the page if it doesn't contain any object. The header of
6118  * the heap cannot be thrown.
6119  */
6120 
6121  if (!(vpid.pageid == hfid->hpgid && vpid.volid == hfid->vfid.volid)
6122  && spage_number_of_records (curr_page_watcher.pgptr) <= 1
6123  /* Is any vacuum required? */
6124  && vacuum_is_mvccid_vacuumed (heap_page_get_max_mvccid (thread_p, curr_page_watcher.pgptr)))
6125  {
6126  /*
6127  * This page can be thrown away
6128  */
6129  pgbuf_ordered_unfix (thread_p, &curr_page_watcher);
6130  if (heap_vpid_remove (thread_p, hfid, &heap_hdr, &vpid) == NULL)
6131  {
6132  goto exit_on_error;
6133  }
6134  vacuum_er_log (VACUUM_ER_LOG_HEAP, "Compactdb removed page %d|%d from heap file (%d, %d|%d).\n",
6136  }
6137  else
6138  {
6139  spage_collect_statistics (curr_page_watcher.pgptr, &npages, &nrecords, &rec_length);
6140 
6141  heap_hdr.estimates.num_pages += npages;
6142  heap_hdr.estimates.num_recs += nrecords;
6143  heap_hdr.estimates.recs_sumlen += rec_length;
6144 
6145  free_space = spage_get_free_space_without_saving (thread_p, curr_page_watcher.pgptr, &need_update);
6146 
6147  if (free_space > HEAP_DROP_FREE_SPACE)
6148  {
6149  if (best < HEAP_NUM_BEST_SPACESTATS)
6150  {
6151  heap_hdr.estimates.best[best].vpid = vpid;
6152  heap_hdr.estimates.best[best].freespace = free_space;
6153  best++;
6154  }
6155  else
6156  {
6157  heap_hdr.estimates.num_other_high_best++;
6158  heap_stats_put_second_best (&heap_hdr, &vpid);
6159  }
6160 
6162  {
6163  (void) heap_stats_add_bestspace (thread_p, hfid, &vpid, free_space);
6164  }
6165  }
6166 
6167  pgbuf_ordered_unfix (thread_p, &curr_page_watcher);
6168  }
6169  }
6170 
6171  heap_hdr.estimates.num_high_best = best;
6172  /*
6173  * Set the rest of the statistics to NULL
6174  */
6175  for (; best < HEAP_NUM_BEST_SPACESTATS; best++)
6176  {
6177  VPID_SET_NULL (&heap_hdr.estimates.best[best].vpid);
6178  heap_hdr.estimates.best[best].freespace = 0;
6179  }
6180 
6181  /* Log the desired changes.. and then change the header We need to log the header changes in order to always benefit
6182  * from the updated statistics and in order to avoid referencing deleted pages in the statistics. */
6183  addr.pgptr = hdr_page_watcher.pgptr;
6185  log_append_undoredo_data (thread_p, RVHF_STATS, &addr, sizeof (HEAP_HDR_STATS), sizeof (HEAP_HDR_STATS),
6186  &initial_heap_hdr, hdr_recdes.data);
6187 
6188  /* Now update the statistics */
6189  if (spage_update (thread_p, hdr_page_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes) != SP_SUCCESS)
6190  {
6191  goto exit_on_error;
6192  }
6193 
6194  pgbuf_ordered_set_dirty_and_free (thread_p, &hdr_page_watcher);
6195 
6196  return ret;
6197 
6198 exit_on_error:
6199 
6200  if (hdr_page_watcher.pgptr != NULL)
6201  {
6202  pgbuf_ordered_unfix (thread_p, &hdr_page_watcher);
6203  }
6204 
6205  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
6206 }
6207 
6208 /*
6209  * heap_ovf_find_vfid () - Find overflow file identifier
6210  * return: ovf_vfid or NULL
6211  * hfid(in): Object heap file identifier
6212  * ovf_vfid(in/out): Overflow file identifier.
6213  * docreate(in): true/false. If true and the overflow file does not
6214  * exist, it is created.
6215  *
6216  * Note: Find overflow file identifier. If the overflow file does not
6217  * exist, it may be created depending of the value of argument create.
6218  */
6219 VFID *
6220 heap_ovf_find_vfid (THREAD_ENTRY * thread_p, const HFID * hfid, VFID * ovf_vfid, bool docreate,
6221  PGBUF_LATCH_CONDITION latch_cond)
6222 {
6223  HEAP_HDR_STATS *heap_hdr; /* Header of heap structure */
6224  LOG_DATA_ADDR addr_hdr; /* Address of logging data */
6225  VPID vpid; /* Page-volume identifier */
6226  RECDES hdr_recdes; /* Header record descriptor */
6227  PGBUF_LATCH_MODE mode;
6228 
6229  addr_hdr.vfid = &hfid->vfid;
6231 
6232  /* Read the header page */
6233  vpid.volid = hfid->vfid.volid;
6234  vpid.pageid = hfid->hpgid;
6235 
6236  mode = (docreate == true ? PGBUF_LATCH_WRITE : PGBUF_LATCH_READ);
6237  addr_hdr.pgptr = pgbuf_fix (thread_p, &vpid, OLD_PAGE, mode, latch_cond);
6238  if (addr_hdr.pgptr == NULL)
6239  {
6240  /* something went wrong, return */
6241  return NULL;
6242  }
6243 
6244  (void) pgbuf_check_page_ptype (thread_p, addr_hdr.pgptr, PAGE_HEAP);
6245 
6246  /* Peek the header record */
6247 
6248  if (spage_get_record (thread_p, addr_hdr.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
6249  {
6250  pgbuf_unfix_and_init (thread_p, addr_hdr.pgptr);
6251  return NULL;
6252  }
6253 
6254  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
6255  if (VFID_ISNULL (&heap_hdr->ovf_vfid))
6256  {
6257  if (docreate == true)
6258  {
6259  FILE_DESCRIPTORS des;
6260  /* Create the overflow file. Try to create the overflow file in the same volume where the heap was defined */
6261 
6262  /* START A TOP SYSTEM OPERATION */
6263  log_sysop_start (thread_p);
6264 
6265  /* Initialize description of overflow heap file */
6266  memset (&des, 0, sizeof (des));
6267  HFID_COPY (&des.heap_overflow.hfid, hfid);
6268  des.heap_overflow.class_oid = heap_hdr->class_oid;
6269  if (file_create_with_npages (thread_p, FILE_MULTIPAGE_OBJECT_HEAP, 1, &des, ovf_vfid) == NO_ERROR)
6270  {
6271  /* Log undo, then redo */
6272  log_append_undo_data (thread_p, RVHF_STATS, &addr_hdr, sizeof (*heap_hdr), heap_hdr);
6273  VFID_COPY (&heap_hdr->ovf_vfid, ovf_vfid);
6274  log_append_redo_data (thread_p, RVHF_STATS, &addr_hdr, sizeof (*heap_hdr), heap_hdr);
6275  pgbuf_set_dirty (thread_p, addr_hdr.pgptr, DONT_FREE);
6276 
6277  log_sysop_commit (thread_p);
6278  }
6279  else
6280  {
6281  log_sysop_abort (thread_p);
6282  ovf_vfid = NULL;
6283  }
6284  }
6285  else
6286  {
6287  ovf_vfid = NULL;
6288  }
6289  }
6290  else
6291  {
6292  VFID_COPY (ovf_vfid, &heap_hdr->ovf_vfid);
6293  }
6294 
6295  pgbuf_unfix_and_init (thread_p, addr_hdr.pgptr);
6296 
6297  return ovf_vfid;
6298 }
6299 
6300 /*
6301  * heap_ovf_insert () - Insert the content of a multipage object in overflow
6302  * return: OID *(ovf_oid on success or NULL on failure)
6303  * hfid(in): Object heap file identifier
6304  * ovf_oid(in/out): Overflow address
6305  * recdes(in): Record descriptor
6306  *
6307  * Note: Insert the content of a multipage object in overflow.
6308  */
6309 static OID *
6310 heap_ovf_insert (THREAD_ENTRY * thread_p, const HFID * hfid, OID * ovf_oid, RECDES * recdes)
6311 {
6312  VFID ovf_vfid;
6313  VPID ovf_vpid; /* Address of overflow insertion */
6314 
6315  if (heap_ovf_find_vfid (thread_p, hfid, &ovf_vfid, true, PGBUF_UNCONDITIONAL_LATCH) == NULL
6316  || overflow_insert (thread_p, &ovf_vfid, &ovf_vpid, recdes, FILE_MULTIPAGE_OBJECT_HEAP) != NO_ERROR)
6317  {
6318  return NULL;
6319  }
6320 
6321  ovf_oid->pageid = ovf_vpid.pageid;
6322  ovf_oid->volid = ovf_vpid.volid;
6323  ovf_oid->slotid = NULL_SLOTID; /* Irrelevant */
6324 
6325  return ovf_oid;
6326 }
6327 
6328 /*
6329  * heap_ovf_update () - Update the content of a multipage object
6330  * return: OID *(ovf_oid on success or NULL on failure)
6331  * hfid(in): Object heap file identifier
6332  * ovf_oid(in): Overflow address
6333  * recdes(in): Record descriptor
6334  *
6335  * Note: Update the content of a multipage object.
6336  */
6337 static const OID *
6338 heap_ovf_update (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * ovf_oid, RECDES * recdes)
6339 {
6340  VFID ovf_vfid;
6341  VPID ovf_vpid;
6342 
6343  if (heap_ovf_find_vfid (thread_p, hfid, &ovf_vfid, false, PGBUF_UNCONDITIONAL_LATCH) == NULL)
6344  {
6345  return NULL;
6346  }
6347 
6348  ovf_vpid.pageid = ovf_oid->pageid;
6349  ovf_vpid.volid = ovf_oid->volid;
6350 
6351  if (overflow_update (thread_p, &ovf_vfid, &ovf_vpid, recdes, FILE_MULTIPAGE_OBJECT_HEAP) != NO_ERROR)
6352  {
6353  ASSERT_ERROR ();
6354  return NULL;
6355  }
6356  else
6357  {
6358  return ovf_oid;
6359  }
6360 }
6361 
6362 /*
6363  * heap_ovf_delete () - Delete the content of a multipage object
6364  * return: OID *(ovf_oid on success or NULL on failure)
6365  * hfid(in): Object heap file identifier
6366  * ovf_oid(in): Overflow address
6367  * ovf_vfid_p(in): Overflow file identifier. If given argument is NULL,
6368  * it must be obtained from heap file header.
6369  *
6370  * Note: Delete the content of a multipage object.
6371  */
6372 const OID *
6373 heap_ovf_delete (THREAD_ENTRY * thread_p, const HFID * hfid, const OID * ovf_oid, VFID * ovf_vfid_p)
6374 {
6375  VFID ovf_vfid;
6376  VPID ovf_vpid;
6377 
6378  if (ovf_vfid_p == NULL || VFID_ISNULL (ovf_vfid_p))
6379  {
6380  /* Get overflow file VFID from heap file header. */
6381  ovf_vfid_p = (ovf_vfid_p != NULL) ? ovf_vfid_p : &ovf_vfid;
6382  if (heap_ovf_find_vfid (thread_p, hfid, ovf_vfid_p, false, PGBUF_UNCONDITIONAL_LATCH) == NULL)
6383  {
6384  return NULL;
6385  }
6386  }
6387 
6388  ovf_vpid.pageid = ovf_oid->pageid;
6389  ovf_vpid.volid = ovf_oid->volid;
6390 
6391  if (overflow_delete (thread_p, ovf_vfid_p, &ovf_vpid) == NULL)
6392  {
6393  return NULL;
6394  }
6395  else
6396  {
6397  return ovf_oid;
6398  }
6399 
6400 }
6401 
6402 /*
6403  * heap_ovf_flush () - Flush all overflow dirty pages where the object resides
6404  * return: NO_ERROR
6405  * ovf_oid(in): Overflow address
6406  *
6407  * Note: Flush all overflow dirty pages where the object resides.
6408  */
6409 static int
6410 heap_ovf_flush (THREAD_ENTRY * thread_p, const OID * ovf_oid)
6411 {
6412  VPID ovf_vpid;
6413 
6414  ovf_vpid.pageid = ovf_oid->pageid;
6415  ovf_vpid.volid = ovf_oid->volid;
6416  overflow_flush (thread_p, &ovf_vpid);
6417 
6418  return NO_ERROR;
6419 }
6420 
6421 /*
6422  * heap_ovf_get_length () - Find length of overflow object
6423  * return: length
6424  * ovf_oid(in): Overflow address
6425  *
6426  * Note: The length of the content of a multipage object associated
6427  * with the given overflow address is returned. In the case of
6428  * any error, -1 is returned.
6429  */
6430 static int
6431 heap_ovf_get_length (THREAD_ENTRY * thread_p, const OID * ovf_oid)
6432 {
6433  VPID ovf_vpid;
6434 
6435  ovf_vpid.pageid = ovf_oid->pageid;
6436  ovf_vpid.volid = ovf_oid->volid;
6437 
6438  return overflow_get_length (thread_p, &ovf_vpid);
6439 }
6440 
6441 /*
6442  * heap_ovf_get () - get/retrieve the content of a multipage object from overflow
6443  * return: SCAN_CODE
6444  * (Either of S_SUCCESS, S_DOESNT_FIT, S_END)
6445  * ovf_oid(in): Overflow address
6446  * recdes(in): Record descriptor
6447  * chn(in):
6448  *
6449  * Note: The content of a multipage object associated with the given
6450  * overflow address(oid) is placed into the area pointed to by
6451  * the record descriptor. If the content of the object does not
6452  * fit in such an area (i.e., recdes->area_size), an error is
6453  * returned and a hint of its length is returned as a negative
6454  * value in recdes->length. The length of the retrieved object is
6455  * set in the the record descriptor (i.e., recdes->length).
6456  */
6457 static SCAN_CODE
6458 heap_ovf_get (THREAD_ENTRY * thread_p, const OID * ovf_oid, RECDES * recdes, int chn, MVCC_SNAPSHOT * mvcc_snapshot)
6459 {
6460  VPID ovf_vpid;
6461  int rest_length;
6462  SCAN_CODE scan;
6463 
6464  ovf_vpid.pageid = ovf_oid->pageid;
6465  ovf_vpid.volid = ovf_oid->volid;
6466 
6467  if (chn != NULL_CHN)
6468  {
6469  /*
6470  * This assumes that most of the time, we have the right cache coherency
6471  * number and that it is expensive to copy the overflow object to be
6472  * thrown most of the time. Thus, it is OK to do some extra page look up
6473  * when failures (it should be OK since the overflow page should be
6474  * already in the page buffer pool.
6475  */
6476 
6477  scan = overflow_get_nbytes (thread_p, &ovf_vpid, recdes, 0, OR_MVCC_MAX_HEADER_SIZE, &rest_length, mvcc_snapshot);
6478  if (scan == S_SUCCESS && chn == or_chn (recdes))
6479  {
6480  return S_SUCCESS_CHN_UPTODATE;
6481  }
6482  }
6483  scan = overflow_get (thread_p, &ovf_vpid, recdes, mvcc_snapshot);
6484 
6485  return scan;
6486 }
6487 
6488 /*
6489  * heap_ovf_get_capacity () - Find space consumed oveflow object
6490  * return: NO_ERROR
6491  * ovf_oid(in): Overflow address
6492  * ovf_len(out): Length of overflow object
6493  * ovf_num_pages(out): Total number of overflow pages
6494  * ovf_overhead(out): System overhead for overflow record
6495  * ovf_free_space(out): Free space for exapnsion of the overflow rec
6496  *
6497  * Note: Find the current storage facts/capacity of given overflow rec
6498  */
6499 static int
6500 heap_ovf_get_capacity (THREAD_ENTRY * thread_p, const OID * ovf_oid, int *ovf_len, int *ovf_num_pages,
6501  int *ovf_overhead, int *ovf_free_space)
6502 {
6503  VPID ovf_vpid;
6504 
6505  ovf_vpid.pageid = ovf_oid->pageid;
6506  ovf_vpid.volid = ovf_oid->volid;
6507 
6508  return overflow_get_capacity (thread_p, &ovf_vpid, ovf_len, ovf_num_pages, ovf_overhead, ovf_free_space);
6509 }
6510 
6511 /*
6512  * heap_scancache_check_with_hfid () - Check if scancache is on provided HFID
6513  * and reinitialize it otherwise
6514  * thread_p(in): thread entry
6515  * hfid(in): heap file identifier to check the scancache against
6516  * scan_cache(in/out): pointer to scancache pointer
6517  * returns: error code or NO_ERROR
6518  *
6519  * NOTE: Function may alter the scan cache address. Caller must make sure it
6520  * doesn't pass it's only reference to the object OR it is not the owner
6521  * of the object.
6522  * NOTE: Function may alter the members of (*scan_cache).
6523  */
6524 static int
6525 heap_scancache_check_with_hfid (THREAD_ENTRY * thread_p, HFID * hfid, OID * class_oid, HEAP_SCANCACHE ** scan_cache)
6526 {
6527  if (*scan_cache != NULL)
6528  {
6529  if ((*scan_cache)->debug_initpattern != HEAP_DEBUG_SCANCACHE_INITPATTERN)
6530  {
6531  er_log_debug (ARG_FILE_LINE, "heap_insert: Your scancache is not initialized");
6532  *scan_cache = NULL;
6533  }
6534  else if (!HFID_EQ (&(*scan_cache)->node.hfid, hfid) || OID_ISNULL (&(*scan_cache)->node.class_oid))
6535  {
6536  int r;
6537 
6538  /* scancache is not on our heap file, reinitialize it */
6539  /* this is a very dangerous thing to do and is very risky. the caller may have done a big mistake.
6540  * we could use it as backup for release run, but we should catch it on debug.
6541  * todo: add assert (false); here
6542  */
6543  r = heap_scancache_reset_modify (thread_p, *scan_cache, hfid, class_oid);
6544  if (r != NO_ERROR)
6545  {
6546  return r;
6547  }
6548  }
6549  }
6550 
6551  /* all ok */
6552  return NO_ERROR;
6553 }
6554 
6555 /*
6556  * heap_scancache_start_internal () - Start caching information for a heap scan
6557  * return: NO_ERROR
6558  * scan_cache(in/out): Scan cache
6559  * hfid(in): Heap file identifier of the scan cache or NULL
6560  * If NULL is given heap_get is the only function that can
6561  * be used with the scan cache.
6562  * class_oid(in): Class identifier of scan cache
6563  * For any class, NULL or NULL_OID can be given
6564  * cache_last_fix_page(in): Wheater or not to cache the last fetched page
6565  * between scan objects ?
6566  * is_queryscan(in):
6567  * is_indexscan(in):
6568  *
6569  */
6570 static int
6571 heap_scancache_start_internal (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, const HFID * hfid,
6572  const OID * class_oid, int cache_last_fix_page, bool is_queryscan, int is_indexscan,
6574 {
6575  int ret = NO_ERROR;
6576 
6577  if (class_oid != NULL)
6578  {
6579  /*
6580  * Scanning the instances of a specific class
6581  */
6582  scan_cache->node.class_oid = *class_oid;
6583 
6584  if (is_queryscan == true)
6585  {
6586  /*
6587  * Acquire a lock for the heap scan so that the class is not updated
6588  * during the scan of the heap. This can happen in transaction isolation
6589  * levels that release the locks of the class when the class is read.
6590  */
6591  if (lock_scan (thread_p, class_oid, LK_UNCOND_LOCK, IS_LOCK) != LK_GRANTED)
6592  {
6593  goto exit_on_error;
6594  }
6595  }
6596 
6597  ret =
6598  heap_get_hfid_and_file_type_from_class_oid (thread_p, class_oid, &scan_cache->node.hfid,
6599  &scan_cache->file_type);
6600  if (ret != NO_ERROR)
6601  {
6602  ASSERT_ERROR ();
6603  return ret;
6604  }
6605  assert (hfid == NULL || HFID_EQ (hfid, &scan_cache->node.hfid));
6606  assert (scan_cache->file_type == FILE_HEAP || scan_cache->file_type == FILE_HEAP_REUSE_SLOTS);
6607  }
6608  else
6609  {
6610  /*
6611  * Scanning the instances of any class in the heap
6612  */
6613  OID_SET_NULL (&scan_cache->node.class_oid);
6614 
6615  if (hfid == NULL)
6616  {
6617  HFID_SET_NULL (&scan_cache->node.hfid);
6618  scan_cache->node.hfid.vfid.volid = NULL_VOLID;
6619  scan_cache->file_type = FILE_UNKNOWN_TYPE;
6620  }
6621  else
6622  {
6623  scan_cache->node.hfid.vfid.volid = hfid->vfid.volid;
6624  scan_cache->node.hfid.vfid.fileid = hfid->vfid.fileid;
6625  scan_cache->node.hfid.hpgid = hfid->hpgid;
6626  if (file_get_type (thread_p, &hfid->vfid, &scan_cache->file_type) != NO_ERROR)
6627  {
6628  ASSERT_ERROR ();
6629  goto exit_on_error;
6630  }
6631  if (scan_cache->file_type == FILE_UNKNOWN_TYPE)
6632  {
6633  assert_release (false);
6634  goto exit_on_error;
6635  }
6636  }
6637  }
6638 
6639  scan_cache->page_latch = S_LOCK;
6640 
6641  scan_cache->cache_last_fix_page = cache_last_fix_page;
6643  scan_cache->area = NULL;
6644  scan_cache->area_size = -1;
6645  scan_cache->num_btids = 0;
6646  scan_cache->index_stat_info = NULL;
6647 
6649  scan_cache->mvcc_snapshot = mvcc_snapshot;
6650  scan_cache->partition_list = NULL;
6651 
6652  return ret;
6653 
6654 exit_on_error:
6655 
6656  HFID_SET_NULL (&scan_cache->node.hfid);
6657  scan_cache->node.hfid.vfid.volid = NULL_VOLID;
6658  OID_SET_NULL (&scan_cache->node.class_oid);
6659  scan_cache->page_latch = NULL_LOCK;
6660  scan_cache->cache_last_fix_page = false;
6662  scan_cache->area = NULL;
6663  scan_cache->area_size = 0;
6664  scan_cache->num_btids = 0;
6665  scan_cache->index_stat_info = NULL;
6666  scan_cache->file_type = FILE_UNKNOWN_TYPE;
6667  scan_cache->debug_initpattern = 0;
6668  scan_cache->mvcc_snapshot = NULL;
6669  scan_cache->partition_list = NULL;
6670 
6671  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
6672 }
6673 
6674 /*
6675  * heap_scancache_start () - Start caching information for a heap scan
6676  * return: NO_ERROR
6677  * scan_cache(in/out): Scan cache
6678  * hfid(in): Heap file identifier of the scan cache or NULL
6679  * If NULL is given heap_get is the only function that can
6680  * be used with the scan cache.
6681  * class_oid(in): Class identifier of scan cache
6682  * For any class, NULL or NULL_OID can be given
6683  * cache_last_fix_page(in): Wheater or not to cache the last fetched page
6684  * between scan objects ?
6685  * is_indexscan(in):
6686  *
6687  */
6688 int
6689 heap_scancache_start (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, const HFID * hfid, const OID * class_oid,
6690  int cache_last_fix_page, int is_indexscan, MVCC_SNAPSHOT * mvcc_snapshot)
6691 {
6692  return heap_scancache_start_internal (thread_p, scan_cache, hfid, class_oid, cache_last_fix_page, true, is_indexscan,
6693  mvcc_snapshot);
6694 }
6695 
6696 /*
6697  * heap_scancache_start_modify () - Start caching information for heap
6698  * modifications
6699  * return: NO_ERROR
6700  * scan_cache(in/out): Scan cache
6701  * hfid(in): Heap file identifier of the scan cache or NULL
6702  * If NULL is given heap_get is the only function that can
6703  * be used with the scan cache.
6704  * class_oid(in): Class identifier of scan cache
6705  * For any class, NULL or NULL_OID can be given
6706  * op_type(in):
6707  *
6708  * Note: A scancache structure is started for heap modifications.
6709  * The scan_cache structure is used to modify objects of the heap
6710  * with heap_insert, heap_update, and heap_delete. The scan structure
6711  * is used to cache information about the latest used page which
6712  * can be used by the following function to guess where to insert
6713  * objects, or other updates and deletes on the same page.
6714  * Good when we are updating things in a sequential way.
6715  *
6716  * The heap manager automatically resets the scan_cache structure
6717  * when it is used with a different heap. That is, the scan_cache
6718  * is reset with the heap and class of the insertion, update, and
6719  * delete. Therefore, you could pass NULLs to hfid, and class_oid
6720  * to this function, but that it is not recommended.
6721  */
6722 int
6724  const OID * class_oid, int op_type, MVCC_SNAPSHOT * mvcc_snapshot)
6725 {
6726  OR_CLASSREP *classrepr = NULL;
6727  int classrepr_cacheindex = -1;
6728  int malloc_size, i;
6729  int ret = NO_ERROR;
6730 
6731  if (heap_scancache_start_internal (thread_p, scan_cache, hfid, NULL, false, false, false, mvcc_snapshot) != NO_ERROR)
6732  {
6733  goto exit_on_error;
6734  }
6735 
6736  if (class_oid != NULL)
6737  {
6738  ret = heap_scancache_reset_modify (thread_p, scan_cache, hfid, class_oid);
6739  if (ret != NO_ERROR)
6740  {
6741  goto exit_on_error;
6742  }
6743  }
6744  else
6745  {
6746  scan_cache->page_latch = X_LOCK;
6747  }
6748 
6749  if (BTREE_IS_MULTI_ROW_OP (op_type) && class_oid != NULL && !OID_EQ (class_oid, oid_Root_class_oid))
6750  {
6751  /* get class representation to find the total number of indexes */
6752  classrepr = heap_classrepr_get (thread_p, (OID *) class_oid, NULL, NULL_REPRID, &classrepr_cacheindex);
6753  if (classrepr == NULL)
6754  {
6755  goto exit_on_error;
6756  }
6757  scan_cache->num_btids = classrepr->n_indexes;
6758 
6759  if (scan_cache->num_btids > 0)
6760  {
6761  /* allocate local btree statistical information structure */
6762  malloc_size = sizeof (BTREE_UNIQUE_STATS) * scan_cache->num_btids;
6763 
6764  if (scan_cache->index_stat_info != NULL)
6765  {
6766  db_private_free (thread_p, scan_cache->index_stat_info);
6767  }
6768 
6769  scan_cache->index_stat_info = (BTREE_UNIQUE_STATS *) db_private_alloc (thread_p, malloc_size);
6770  if (scan_cache->index_stat_info == NULL)
6771  {
6773  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
6774  goto exit_on_error;
6775  }
6776  /* initialize the structure */
6777  for (i = 0; i < scan_cache->num_btids; i++)
6778  {
6779  BTID_COPY (&(scan_cache->index_stat_info[i].btid), &(classrepr->indexes[i].btid));
6780  scan_cache->index_stat_info[i].num_nulls = 0;
6781  scan_cache->index_stat_info[i].num_keys = 0;
6782  scan_cache->index_stat_info[i].num_oids = 0;
6783  }
6784  }
6785 
6786  /* free class representation */
6787  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
6788  }
6789 
6790  /* In case of SINGLE_ROW_INSERT, SINGLE_ROW_UPDATE, SINGLE_ROW_DELETE, or SINGLE_ROW_MODIFY, the 'num_btids' and
6791  * 'index_stat_info' of scan cache structure have to be set as 0 and NULL, respectively. */
6792 
6793  return ret;
6794 
6795 exit_on_error:
6796 
6797  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
6798 }
6799 
6800 /*
6801  * heap_scancache_force_modify () -
6802  * return: NO_ERROR
6803  * scan_cache(in):
6804  */
6805 static int
6806 heap_scancache_force_modify (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache)
6807 {
6808  if (scan_cache == NULL || scan_cache->debug_initpattern != HEAP_DEBUG_SCANCACHE_INITPATTERN)
6809  {
6810  return NO_ERROR;
6811  }
6812 
6813  /* Free fetched page */
6814  if (scan_cache->page_watcher.pgptr != NULL)
6815  {
6816  pgbuf_ordered_unfix (thread_p, &(scan_cache->page_watcher));
6817  }
6818 
6819  return NO_ERROR;
6820 }
6821 
6822 /*
6823  * heap_scancache_reset_modify () - Reset the current caching information
6824  * return: NO_ERROR
6825  * scan_cache(in/out): Scan cache
6826  * hfid(in): Heap file identifier of the scan cache
6827  * class_oid(in): Class identifier of scan cache
6828  *
6829  * Note: Any page that has been cached under the current scan cache is
6830  * freed and the scancache structure is reinitialized with the
6831  * new information.
6832  */
6833 static int
6834 heap_scancache_reset_modify (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, const HFID * hfid,
6835  const OID * class_oid)
6836 {
6837  int ret;
6838 
6839  ret = heap_scancache_force_modify (thread_p, scan_cache);
6840  if (ret != NO_ERROR)
6841  {
6842  return ret;
6843  }
6844 
6845  if (class_oid != NULL)
6846  {
6847  if (!OID_EQ (class_oid, &scan_cache->node.class_oid))
6848  {
6849  ret =
6850  heap_get_hfid_and_file_type_from_class_oid (thread_p, class_oid, &scan_cache->node.hfid,
6851  &scan_cache->file_type);
6852  if (ret != NO_ERROR)
6853  {
6854  ASSERT_ERROR ();
6855  return ret;
6856  }
6857  assert (HFID_EQ (&scan_cache->node.hfid, hfid));
6858  scan_cache->node.class_oid = *class_oid;
6859  }
6860  }
6861  else
6862  {
6863  OID_SET_NULL (&scan_cache->node.class_oid);
6864 
6865  if (!HFID_EQ (&scan_cache->node.hfid, hfid))
6866  {
6867  scan_cache->node.hfid.vfid.volid = hfid->vfid.volid;
6868  scan_cache->node.hfid.vfid.fileid = hfid->vfid.fileid;
6869  scan_cache->node.hfid.hpgid = hfid->hpgid;
6870 
6871  ret = file_get_type (thread_p, &hfid->vfid, &scan_cache->file_type);
6872  if (ret != NO_ERROR)
6873  {
6874  ASSERT_ERROR ();
6875  return ret;
6876  }
6877  if (scan_cache->file_type == FILE_UNKNOWN_TYPE)
6878  {
6879  assert_release (false);
6880  return ER_FAILED;
6881  }
6882  }
6883  }
6884  scan_cache->page_latch = X_LOCK;
6885 
6886  return ret;
6887 }
6888 
6889 /*
6890  * heap_scancache_quick_start () - Start caching information for a heap scan
6891  * return: NO_ERROR
6892  * scan_cache(in/out): Scan cache
6893  *
6894  * Note: This is a quick way to initialize a scancahe structure. It
6895  * should be used only when we would like to peek only one object
6896  * (heap_get). This function will cache the last fetched page by default.
6897  *
6898  * This function was created to avoid some of the overhead
6899  * associated with scancahe(e.g., find best pages, lock the heap)
6900  * since we are not really scanning the heap.
6901  *
6902  * For other needs/uses, please refer to heap_scancache_start ().
6903  *
6904  * Note: Using many scancaches with the cached_fix page option at the
6905  * same time should be avoided since page buffers are fixed and
6906  * locked for future references and there is a limit of buffers
6907  * in the page buffer pool. This is analogous to fetching many
6908  * pages at the same time. The page buffer pool is expanded when
6909  * needed, however, developers must pay special attention to
6910  * avoid this situation.
6911  */
6912 int
6914 {
6915  heap_scancache_quick_start_internal (scan_cache, NULL);
6916 
6917  scan_cache->page_latch = S_LOCK;
6918 
6919  return NO_ERROR;
6920 }
6921 
6922 /*
6923  * heap_scancache_quick_start_modify () - Start caching information
6924  * for a heap modifications
6925  * return: NO_ERROR
6926  * scan_cache(in/out): Scan cache
6927  */
6928 int
6930 {
6931  heap_scancache_quick_start_internal (scan_cache, NULL);
6932 
6933  scan_cache->page_latch = X_LOCK;
6934 
6935  return NO_ERROR;
6936 }
6937 
6938 /*
6939  * heap_scancache_quick_start_internal () -
6940  *
6941  * return: NO_ERROR
6942  * scan_cache(in/out): Scan cache
6943  */
6944 static int
6945 heap_scancache_quick_start_internal (HEAP_SCANCACHE * scan_cache, const HFID * hfid)
6946 {
6947  HFID_SET_NULL (&scan_cache->node.hfid);
6948  if (hfid == NULL)
6949  {
6950  scan_cache->node.hfid.vfid.volid = NULL_VOLID;
6952  }
6953  else
6954  {
6955  HFID_COPY (&scan_cache->node.hfid, hfid);
6957  }
6958  OID_SET_NULL (&scan_cache->node.class_oid);
6959  scan_cache->page_latch = S_LOCK;
6960  scan_cache->cache_last_fix_page = true;
6961  scan_cache->area = NULL;
6962  scan_cache->area_size = 0;
6963  scan_cache->num_btids = 0;
6964  scan_cache->index_stat_info = NULL;
6965  scan_cache->file_type = FILE_UNKNOWN_TYPE;
6967  scan_cache->mvcc_snapshot = NULL;
6968  scan_cache->partition_list = NULL;
6969 
6970  return NO_ERROR;
6971 }
6972 
6973 /*
6974  * heap_scancache_quick_end () - Stop caching information for a heap scan
6975  * return: NO_ERROR
6976  * scan_cache(in/out): Scan cache
6977  *
6978  * Note: Any fixed heap page on the given scan is freed and any memory
6979  * allocated by this scan is also freed. The scan_cache structure
6980  * is undefined. This function does not update any space statistics.
6981  */
6982 static int
6983 heap_scancache_quick_end (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache)
6984 {
6985  int ret = NO_ERROR;
6986 
6988  {
6989  er_log_debug (ARG_FILE_LINE, "heap_scancache_quick_end: Your scancache is not initialized");
6990  ret = ER_GENERIC_ERROR;
6992  }
6993  else
6994  {
6995  if (scan_cache->index_stat_info != NULL)
6996  {
6997  /* deallocate memory space allocated for index stat info. */
6998  db_private_free_and_init (thread_p, scan_cache->index_stat_info);
6999  scan_cache->num_btids = 0;
7000  }
7001 
7002  if (scan_cache->cache_last_fix_page == true)
7003  {
7004  /* Free fetched page */
7005  if (scan_cache->page_watcher.pgptr != NULL)
7006  {
7007  pgbuf_ordered_unfix (thread_p, &scan_cache->page_watcher);
7008  }
7009  }
7010 
7011  /* Free memory */
7012  if (scan_cache->area)
7013  {
7014  db_private_free_and_init (thread_p, scan_cache->area);
7015  }
7016 
7017  if (scan_cache->partition_list)
7018  {
7019  HEAP_SCANCACHE_NODE_LIST *next_node = NULL;
7020  HEAP_SCANCACHE_NODE_LIST *curr_node = NULL;
7021 
7022  curr_node = scan_cache->partition_list;
7023 
7024  while (curr_node != NULL)
7025  {
7026  next_node = curr_node->next;
7027  db_private_free_and_init (thread_p, curr_node);
7028  curr_node = next_node;
7029  }
7030  }
7031  }
7032 
7033  HFID_SET_NULL (&scan_cache->node.hfid);
7034  scan_cache->node.hfid.vfid.volid = NULL_VOLID;
7035  OID_SET_NULL (&scan_cache->node.class_oid);
7036  scan_cache->page_latch = NULL_LOCK;
7037  assert (PGBUF_IS_CLEAN_WATCHER (&(scan_cache->page_watcher)));
7038  scan_cache->area = NULL;
7039  scan_cache->area_size = 0;
7040  scan_cache->file_type = FILE_UNKNOWN_TYPE;
7041  scan_cache->debug_initpattern = 0;
7042 
7043  return ret;
7044 }
7045 
7046 /*
7047  * heap_scancache_end_internal () -
7048  * return: NO_ERROR
7049  * scan_cache(in):
7050  * scan_state(in):
7051  */
7052 static int
7053 heap_scancache_end_internal (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, bool scan_state)
7054 {
7055  int ret = NO_ERROR;
7056 
7058  {
7059  er_log_debug (ARG_FILE_LINE, "heap_scancache_end_internal: Your scancache is not initialized");
7060  return ER_FAILED;
7061  }
7062 
7063  ret = heap_scancache_quick_end (thread_p, scan_cache);
7064 
7065  return ret;
7066 }
7067 
7068 /*
7069  * heap_scancache_end () - Stop caching information for a heap scan
7070  * return: NO_ERROR
7071  * scan_cache(in/out): Scan cache
7072  *
7073  * Note: Any fixed heap page on the given scan is freed and any memory
7074  * allocated by this scan is also freed. The scan_cache structure is undefined.
7075  */
7076 int
7078 {
7079  int ret;
7080 
7081  ret = heap_scancache_end_internal (thread_p, scan_cache, END_SCAN);
7082 
7083  return NO_ERROR;
7084 }
7085 
7086 /*
7087  * heap_scancache_end_when_scan_will_resume () -
7088  * return:
7089  * scan_cache(in):
7090  */
7091 int
7093 {
7094  int ret;
7095 
7096  ret = heap_scancache_end_internal (thread_p, scan_cache, CONTINUE_SCAN);
7097 
7098  return NO_ERROR;
7099 }
7100 
7101 /*
7102  * heap_scancache_end_modify () - End caching information for a heap
7103  * modification cache
7104  * return:
7105  * scan_cache(in/out): Scan cache
7106  *
7107  * Note: Any fixed heap page on the given scan is freed. The heap
7108  * best find space statistics for the heap are completely updated
7109  * with the ones stored in the scan cache.
7110  */
7111 void
7113 {
7114  int ret;
7115 
7116  ret = heap_scancache_force_modify (thread_p, scan_cache);
7117  if (ret == NO_ERROR)
7118  {
7119  ret = heap_scancache_quick_end (thread_p, scan_cache);
7120  }
7121 }
7122 
7123 /*
7124  * heap_get_if_diff_chn () - Get specified object of the given slotted page when
7125  * its cache coherency number is different
7126  * return: SCAN_CODE
7127  * (Either of S_SUCCESS,
7128  * S_SUCCESS_CHN_UPTODATE,
7129  * S_DOESNT_FIT,
7130  * S_DOESNT_EXIST)
7131  * pgptr(in): Pointer to slotted page
7132  * slotid(in): Slot identifier of current record.
7133  * recdes(in/out): Pointer to a record descriptor. Will be modified to
7134  * describe the desired record.
7135  * ispeeking(in): Indicates whether the record is going to be copied
7136  * (like a copy) or peeked (read at the buffer).
7137  * chn(in): Cache coherency number or NULL_CHN
7138  *
7139  * Note: If the given CHN is the same as the chn of the specified
7140  * object in the slotted page, the object may not be placed in
7141  * the given record descriptor. If the given CHN is NULL_CHN or
7142  * is not given, then the following process is followed depending
7143  * upon if we are peeking or not:
7144  * When ispeeking is PEEK, the desired record is peeked onto the
7145  * buffer pool. The address of the record descriptor is set
7146  * to the portion of the buffer pool where the record is stored.
7147  * For more information on peeking description, see the slotted module.
7148  *
7149  * When ispeeking is COPY, the desired record is read
7150  * onto the area pointed by the record descriptor. If the record
7151  * does not fit in such an area, the length of the record is
7152  * returned as a negative value in recdes->length and an error
7153  * condition is indicated.
7154  */
7155 static SCAN_CODE
7156 heap_get_if_diff_chn (THREAD_ENTRY * thread_p, PAGE_PTR pgptr, INT16 slotid, RECDES * recdes, bool ispeeking, int chn,
7158 {
7159  RECDES chn_recdes; /* Used when we need to compare the cache coherency number and we are not peeking */
7160  SCAN_CODE scan;
7161  MVCC_REC_HEADER mvcc_header;
7162 
7163  /*
7164  * Don't retrieve the object when the object has the same cache
7165  * coherency number given by the caller. That is, the caller has the
7166  * valid cached object.
7167  */
7168 
7169  if (ispeeking == PEEK)
7170  {
7171  scan = spage_get_record (thread_p, pgptr, slotid, recdes, PEEK);
7172  if (scan != S_SUCCESS)
7173  {
7174  return scan;
7175  }
7176 
7177  /* For MVCC we need to obtain header and verify header */
7178  or_mvcc_get_header (recdes, &mvcc_header);
7179  if (scan == S_SUCCESS && mvcc_snapshot != NULL && mvcc_snapshot->snapshot_fnc != NULL)
7180  {
7181  if (mvcc_snapshot->snapshot_fnc (thread_p, &mvcc_header, mvcc_snapshot) == TOO_OLD_FOR_SNAPSHOT)
7182  {
7183  /* consider snapshot is not satisified only in case of TOO_OLD_FOR_SNAPSHOT;
7184  * TOO_NEW_FOR_SNAPSHOT records should be accepted, e.g. a recently updated record, locked at select */
7185  return S_SNAPSHOT_NOT_SATISFIED;
7186  }
7187  }
7188  if (MVCC_IS_CHN_UPTODATE (&mvcc_header, chn))
7189  {
7190  /* Test chn if MVCC is disabled for record or if delete MVCCID is invalid and the record is inserted by
7191  * current transaction. */
7192  /* When testing chn is not required, the result is considered up-to-date. */
7193  scan = S_SUCCESS_CHN_UPTODATE;
7194  }
7195  }
7196  else
7197  {
7198  scan = spage_get_record (thread_p, pgptr, slotid, &chn_recdes, PEEK);
7199  if (scan != S_SUCCESS)
7200  {
7201  return scan;
7202  }
7203 
7204  /* For MVCC we need to obtain header and verify header */
7205  or_mvcc_get_header (&chn_recdes, &mvcc_header);
7206  if (scan == S_SUCCESS && mvcc_snapshot != NULL && mvcc_snapshot->snapshot_fnc != NULL)
7207  {
7208  if (mvcc_snapshot->snapshot_fnc (thread_p, &mvcc_header, mvcc_snapshot) == TOO_OLD_FOR_SNAPSHOT)
7209  {
7210  /* consider snapshot is not satisified only in case of TOO_OLD_FOR_SNAPSHOT;
7211  * TOO_NEW_FOR_SNAPSHOT records should be accepted, e.g. a recently updated record, locked at select */
7212  return S_SNAPSHOT_NOT_SATISFIED;
7213  }
7214  }
7215  if (MVCC_IS_CHN_UPTODATE (&mvcc_header, chn))
7216  {
7217  /* Test chn if MVCC is disabled for record or if delete MVCCID is invalid and the record is inserted by
7218  * current transaction. */
7219  /* When testing chn is not required, the result is considered up-to-date. */
7220  scan = S_SUCCESS_CHN_UPTODATE;
7221  }
7222 
7223  if (scan != S_SUCCESS_CHN_UPTODATE)
7224  {
7225  /*
7226  * Note that we could copy the recdes.data from chn_recdes.data, but
7227  * I don't think it is much difference here, and we will have to deal
7228  * with all not fit conditions and so on, so we decide to use
7229  * spage_get_record instead.
7230  */
7231  scan = spage_get_record (thread_p, pgptr, slotid, recdes, COPY);
7232  }
7233  }
7234 
7235  return scan;
7236 }
7237 
7238 /*
7239  * heap_prepare_get_context () - Prepare for obtaining/processing heap object.
7240  * It may get class_oid, record_type, home page
7241  * and also forward_oid and forward_page in some
7242  * cases.
7243  *
7244  * return : SCAN_CODE: S_ERROR, S_DOESNT_EXIST and S_SUCCESS.
7245  * thread_p (in) : Thread entry.
7246  * context (in/out) : Heap get context used to store the information required for heap objects processing.
7247  * is_heap_scan (in) : Used to decide if it is acceptable to reach deleted objects or not.
7248  * non_ex_handling_type (in): Handling type for deleted objects
7249  * - LOG_ERROR_IF_DELETED: write the
7250  * ER_HEAP_UNKNOWN_OBJECT error to log
7251  * - LOG_WARNING_IF_DELETED: set only warning
7252  *
7253  * Note : the caller should manage the page unfix of both home and forward
7254  * pages (even in case of error, there may be pages latched).
7255  * The functions uses a multiple page latch; in some extreme cases,
7256  * if the home page was unfixed during fwd page fix, we need to recheck
7257  * the home page OID is still valid and re-PEEK the home record. We
7258  * allow this to repeat once.
7259  * For performance:
7260  * Make sure page unfix is performed in order fwd page, then home page.
7261  * Normal fix sequence (first attempt) is home page, then fwd page; if
7262  * the fwd page is unfixed before home, another thread will attempt to
7263  * fix fwd page, after having home fix; first try (CONDITIONAL) will
7264  * fail, and will trigger an ordered fix + UNCONDITIONAL.
7265  */
7266 SCAN_CODE
7267 heap_prepare_get_context (THREAD_ENTRY * thread_p, HEAP_GET_CONTEXT * context, bool is_heap_scan,
7268  NON_EXISTENT_HANDLING non_ex_handling_type)
7269 {
7270  SPAGE_SLOT *slot_p = NULL;
7271  RECDES peek_recdes;
7272  SCAN_CODE scan = S_SUCCESS;
7273  int try_count = 0;
7274  int try_max = 1;
7275  int ret;
7276 #if defined (SA_MODE)
7277  bool is_system_class = false;
7278 #endif /* SA_MODE */
7279 
7280  assert (context->oid_p != NULL);
7281 
7282 try_again:
7283 
7284  /* First make sure object home_page is fixed. */
7285  ret = heap_prepare_object_page (thread_p, context->oid_p, &context->home_page_watcher, context->latch_mode);
7286  if (ret != NO_ERROR)
7287  {
7288  if (ret == ER_HEAP_UNKNOWN_OBJECT)
7289  {
7290  /* bad page id, consider the object does not exist and let the caller handle the case */
7291  return S_DOESNT_EXIST;
7292  }
7293 
7294  goto error;
7295  }
7296 
7297  /* Output class_oid if necessary. */
7298  if (context->class_oid_p != NULL && OID_ISNULL (context->class_oid_p)
7299  && heap_get_class_oid_from_page (thread_p, context->home_page_watcher.pgptr, context->class_oid_p) != NO_ERROR)
7300  {
7301  /* Unexpected. */
7302  assert_release (false);
7303  goto error;
7304  }
7305 
7306  /* Get slot. */
7307  slot_p = spage_get_slot (context->home_page_watcher.pgptr, context->oid_p->slotid);
7308  if (slot_p == NULL)
7309  {
7310  /* Slot doesn't exist. */
7311  if (!is_heap_scan)
7312  {
7313  /* Do not set error for heap scan and get record info. */
7315  context->oid_p->pageid, context->oid_p->slotid);
7316  }
7317 
7318  /* Output record type as REC_UNKNOWN. */
7319  context->record_type = REC_UNKNOWN;
7320 
7321  return S_DOESNT_EXIST;
7322  }
7323 
7324  /* Output record type. */
7325  context->record_type = slot_p->record_type;
7326 
7327  if (context->fwd_page_watcher.pgptr != NULL && slot_p->record_type != REC_RELOCATION
7328  && slot_p->record_type != REC_BIGONE)
7329  {
7330  /* Forward page no longer required. */
7331  pgbuf_ordered_unfix (thread_p, &context->fwd_page_watcher);
7332  }
7333 
7334  /* Fix required pages. */
7335  switch (slot_p->record_type)
7336  {
7337  case REC_RELOCATION:
7338  /* Need to get forward_oid and fix forward page */
7339  scan = spage_get_record (thread_p, context->home_page_watcher.pgptr, context->oid_p->slotid, &peek_recdes, PEEK);
7340  if (scan != S_SUCCESS)
7341  {
7342  /* Unexpected. */
7343  assert_release (false);
7344  goto error;
7345  }
7346  /* Output forward_oid. */
7347  COPY_OID (&context->forward_oid, (OID *) peek_recdes.data);
7348 
7349  /* Try to latch forward_page. */
7351  ret = heap_prepare_object_page (thread_p, &context->forward_oid, &context->fwd_page_watcher, context->latch_mode);
7352  if (ret == NO_ERROR)
7353  {
7354  /* Pages successfully fixed. */
7355  if (context->home_page_watcher.page_was_unfixed)
7356  {
7357  /* Home_page/forward_page are both fixed. However, since home page was unfixed, record may have changed
7358  * (record type has changed or just the relocation link). Go back and repeat steps (if nothing was
7359  * changed, pages are already fixed). */
7360  if (try_count++ < try_max)
7361  {
7362  context->home_page_watcher.page_was_unfixed = false;
7363  goto try_again;
7364  }
7365  else
7366  {
7368  context->forward_oid.pageid);
7369  }
7370 
7371  goto error;
7372  }
7373  return S_SUCCESS;
7374  }
7375 
7376  goto error;
7377 
7378  case REC_BIGONE:
7379  /* Need to get forward_oid and forward_page (first overflow page). */
7380  scan = spage_get_record (thread_p, context->home_page_watcher.pgptr, context->oid_p->slotid, &peek_recdes, PEEK);
7381  if (scan != S_SUCCESS)
7382  {
7383  /* Unexpected. */
7384  assert_release (false);
7385  goto error;
7386  }
7387  /* Output forward_oid. */
7388  COPY_OID (&context->forward_oid, (OID *) peek_recdes.data);
7389 
7390  /* Fix overflow page. Since overflow pages should be always accessed with their home pages latched, unconditional
7391  * latch should work; However, we need to use the same ordered_fix approach. */
7394  ret = heap_prepare_object_page (thread_p, &context->forward_oid, &context->fwd_page_watcher, context->latch_mode);
7395  if (ret == NO_ERROR)
7396  {
7397  /* Pages successfully fixed. */
7398  if (context->home_page_watcher.page_was_unfixed)
7399  {
7400  /* This is not expected. */
7401  assert (false);
7402  goto error;
7403  }
7404  return S_SUCCESS;
7405  }
7406 
7407  goto error;
7408 
7409  case REC_ASSIGN_ADDRESS:
7410  /* Object without content.. only the address has been assigned */
7411  if (is_heap_scan)
7412  {
7413  /* Just ignore record. */
7414  return S_DOESNT_EXIST;
7415  }
7416  if (spage_check_slot_owner (thread_p, context->home_page_watcher.pgptr, context->oid_p->slotid))
7417  {
7419  context->oid_p->pageid, context->oid_p->slotid);
7420  return S_DOESNT_EXIST;
7421  }
7422  else
7423  {
7425  context->oid_p->pageid, context->oid_p->slotid);
7426  goto error;
7427  }
7428 
7429  case REC_HOME:
7430  /* Only home page is needed. */
7431  return S_SUCCESS;
7432 
7434  case REC_MARKDELETED:
7435  /* Vacuumed/deleted record. */
7436  if (is_heap_scan)
7437  {
7438  /* Just ignore record. */
7439  return S_DOESNT_EXIST;
7440  }
7441 #if defined(SA_MODE)
7442  /* Accessing a REC_MARKDELETED record from a system class can happen in SA mode, when no MVCC operations have
7443  * been performed on the system class. */
7444  if (oid_is_system_class (context->class_oid_p, &is_system_class) != NO_ERROR)
7445  {
7446  goto error;
7447  }
7448  if (is_system_class == true)
7449  {
7451  context->oid_p->pageid, context->oid_p->slotid);
7452  return S_DOESNT_EXIST;
7453  }
7454 #endif /* SA_MODE */
7455 
7457  || non_ex_handling_type == LOG_WARNING_IF_DELETED)
7458  {
7459  /* A deleted class record, corresponding to a deleted class can be accessed through catalog update operations
7460  * on another class. This is possible if a class has an attribute holding a domain that references the
7461  * dropped class. Another situation is the client request for authentication, which fetches the object (an
7462  * instance of db_user) using dirty version. If it has been removed, it will be found as a deleted record. */
7464  context->oid_p->pageid, context->oid_p->slotid);
7465  }
7466  else
7467  {
7469  context->oid_p->pageid, context->oid_p->slotid);
7470  }
7471  return S_DOESNT_EXIST;
7472 
7473  case REC_NEWHOME:
7474  if (is_heap_scan)
7475  {
7476  /* Just ignore record. */
7477  return S_DOESNT_EXIST;
7478  }
7479  /* REC_NEWHOME are only allowed to be accessed through REC_RELOCATION slots. */
7480  /* Fall through to error. */
7481  default:
7482  /* Unexpected case. */
7484  context->oid_p->pageid, context->oid_p->slotid);
7485  goto error;
7486  }
7487 
7488  /* Impossible */
7489  assert_release (false);
7490 error:
7491  assert (ret == ER_LK_PAGE_TIMEOUT || er_errid () != NO_ERROR);
7492 
7493  heap_clean_get_context (thread_p, context);
7494  return S_ERROR;
7495 }
7496 
7497 /*
7498  * heap_get_mvcc_header () - Get record MVCC header.
7499  *
7500  * return : SCAN_CODE: S_SUCCESS, S_ERROR or S_DOESNT_EXIST.
7501  * thread_p (in) : Thread entry.
7502  * context (in) : Heap get context.
7503  * mvcc_header (out) : Record MVCC header.
7504  *
7505  * NOTE: This function gets MVCC header, if it has everything needed already
7506  * obtained: pages latched, forward OID (if the case), record type.
7507  */
7508 SCAN_CODE
7510 {
7511  RECDES peek_recdes;
7512  SCAN_CODE scan_code;
7513  PAGE_PTR home_page, forward_page;
7514  const OID *oid;
7515 
7516  assert (context != NULL && context->oid_p != NULL);
7517 
7518  oid = context->oid_p;
7519  home_page = context->home_page_watcher.pgptr;
7520  forward_page = context->fwd_page_watcher.pgptr;
7521 
7522  assert (home_page != NULL);
7523  assert (pgbuf_get_page_id (home_page) == oid->pageid && pgbuf_get_volume_id (home_page) == oid->volid);
7524  assert (context->record_type == REC_HOME || context->record_type == REC_RELOCATION
7525  || context->record_type == REC_BIGONE);
7526  assert (context->record_type == REC_HOME
7527  || (forward_page != NULL && pgbuf_get_page_id (forward_page) == context->forward_oid.pageid
7528  && pgbuf_get_volume_id (forward_page) == context->forward_oid.volid));
7529  assert (mvcc_header != NULL);
7530 
7531  /* Get header and verify snapshot. */
7532  switch (context->record_type)
7533  {
7534  case REC_HOME:
7535  scan_code = spage_get_record (thread_p, home_page, oid->slotid, &peek_recdes, PEEK);
7536  if (scan_code != S_SUCCESS)
7537  {
7538  /* Unexpected. */
7539  assert (false);
7540  return S_ERROR;
7541  }
7542  if (or_mvcc_get_header (&peek_recdes, mvcc_header) != NO_ERROR)
7543  {
7544  /* Unexpected. */
7545  assert (false);
7546  return S_ERROR;
7547  }
7548  return S_SUCCESS;
7549  case REC_BIGONE:
7550  assert (forward_page != NULL);
7551  if (heap_get_mvcc_rec_header_from_overflow (forward_page, mvcc_header, &peek_recdes) != NO_ERROR)
7552  {
7553  /* Unexpected. */
7554  assert (false);
7555  return S_ERROR;
7556  }
7557  return S_SUCCESS;
7558  case REC_RELOCATION:
7559  assert (forward_page != NULL);
7560  scan_code = spage_get_record (thread_p, forward_page, context->forward_oid.slotid, &peek_recdes, PEEK);
7561  if (scan_code != S_SUCCESS)
7562  {
7563  /* Unexpected. */
7564  assert (false);
7565  return S_ERROR;
7566  }
7567  if (or_mvcc_get_header (&peek_recdes, mvcc_header) != NO_ERROR)
7568  {
7569  /* Unexpected. */
7570  assert (false);
7571  return S_ERROR;
7572  }
7573  return S_SUCCESS;
7574  default:
7575  /* Unexpected. */
7576  assert (false);
7577  return S_ERROR;
7578  }
7579 
7580  /* Impossible. */
7581  assert (false);
7582  return S_ERROR;
7583 }
7584 
7585 /*
7586  * heap_get_record_data_when_all_ready () - Get record data when all required information is known. This can work only
7587  * for record types that actually have data: REC_HOME, REC_RELOCATION and
7588  * REC_BIGONE. Required information: home_page, forward_oid and forward page
7589  * for REC_RELOCATION and REC_BIGONE, and record type.
7590  *
7591  * return : SCAN_CODE: S_SUCCESS, S_ERROR, S_DOESNT_FIT.
7592  * thread_p (in) : Thread entry.
7593  * context (in/out) : Heap get context. Should contain all required information for object retrieving
7594  */
7595 SCAN_CODE
7597 {
7598  HEAP_SCANCACHE *scan_cache_p = context->scan_cache;
7599 
7600  /* We have everything set up to get record data. */
7601  assert (context != NULL);
7602 
7603  /* Assert ispeeking, scan_cache and recdes are compatible. If ispeeking is PEEK, it is the caller responsabilty to
7604  * keep the page latched while the recdes don't go out of scope. If ispeeking is COPY, we must have a preallocated
7605  * area to copy to. This means either scan_cache is not NULL (and scan_cache->area can be used) or recdes->data is
7606  * not NULL (and recdes->area_size defines how much can be copied). */
7607  assert ((context->ispeeking == PEEK)
7608  || (context->ispeeking == COPY && (scan_cache_p != NULL || context->recdes_p->data != NULL)));
7609 
7610  switch (context->record_type)
7611  {
7612  case REC_RELOCATION:
7613  /* Don't peek REC_RELOCATION. */
7614  if (scan_cache_p != NULL && (context->ispeeking != 0 || context->recdes_p->data == NULL)
7615  && heap_scan_cache_allocate_recdes_data (thread_p, scan_cache_p, context->recdes_p,
7616  DB_PAGESIZE * 2) != NO_ERROR)
7617  {
7618  ASSERT_ERROR ();
7619  return S_ERROR;
7620  }
7621 
7622  return spage_get_record (thread_p, context->fwd_page_watcher.pgptr, context->forward_oid.slotid,
7623  context->recdes_p, COPY);
7624  case REC_BIGONE:
7625  return heap_get_bigone_content (thread_p, scan_cache_p, context->ispeeking, &context->forward_oid,
7626  context->recdes_p);
7627  case REC_HOME:
7628  if (scan_cache_p != NULL && context->ispeeking == COPY && context->recdes_p->data == NULL
7629  && heap_scan_cache_allocate_recdes_data (thread_p, scan_cache_p, context->recdes_p,
7630  DB_PAGESIZE * 2) != NO_ERROR)
7631  {
7632  ASSERT_ERROR ();
7633  return S_ERROR;
7634  }
7635  return spage_get_record (thread_p, context->home_page_watcher.pgptr, context->oid_p->slotid, context->recdes_p,
7636  context->ispeeking);
7637  default:
7638  break;
7639  }
7640  /* Shouldn't be here. */
7641  return S_ERROR;
7642 }
7643 
7644 /*
7645  * heap_next_internal () - Retrieve of peek next object.
7646  *
7647  * return : SCAN_CODE (Either of S_SUCCESS, S_DOESNT_FIT,
7648  * S_END, S_ERROR).
7649  * thread_p (in) : Thread entry.
7650  * hfid (in) : Heap file identifier.
7651  * class_oid (in) : Class object identifier.
7652  * next_oid (in/out) : Object identifier of current record. Will be
7653  * set to next available record or NULL_OID
7654  * when there is not one.
7655  * recdes (in) : Pointer to a record descriptor. Will be
7656  * modified to describe the new record.
7657  * scan_cache (in) : Scan cache or NULL
7658  * ispeeking (in) : PEEK when the object is peeked scan_cache can't
7659  * be NULL COPY when the object is copied.
7660  * cache_recordinfo (in/out) : DB_VALUE pointer array that caches record
7661  * information values.
7662  */
7663 static SCAN_CODE
7664 heap_next_internal (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * next_oid, RECDES * recdes,
7665  HEAP_SCANCACHE * scan_cache, bool ispeeking, bool reversed_direction, DB_VALUE ** cache_recordinfo)
7666 {
7667  VPID vpid;
7668  VPID *vpidptr_incache;
7669  INT16 type = REC_UNKNOWN;
7670  OID oid;
7671  RECDES forward_recdes;
7672  SCAN_CODE scan = S_ERROR;
7673  int get_rec_info = cache_recordinfo != NULL;
7674  bool is_null_recdata;
7675  PGBUF_WATCHER curr_page_watcher;
7676  PGBUF_WATCHER old_page_watcher;
7677 
7678  assert (scan_cache != NULL);
7679 
7680 #if defined(CUBRID_DEBUG)
7681  if (scan_cache != NULL && scan_cache->debug_initpattern != HEAP_DEBUG_SCANCACHE_INITPATTERN)
7682  {
7683  er_log_debug (ARG_FILE_LINE, "heap_next: Your scancache is not initialized");
7685  return S_ERROR;
7686  }
7687  if (scan_cache != NULL && HFID_IS_NULL (&scan_cache->hfid))
7688  {
7690  "heap_next: scan_cache without heap.. heap file must be given to heap_scancache_start () when"
7691  " scan_cache is used with heap_first, heap_next, heap_prev heap_last");
7693  return S_ERROR;
7694  }
7695 #endif /* CUBRID_DEBUG */
7696 
7697 
7698  hfid = &scan_cache->node.hfid;
7699  if (!OID_ISNULL (&scan_cache->node.class_oid))
7700  {
7701  class_oid = &scan_cache->node.class_oid;
7702  }
7703 
7704  PGBUF_INIT_WATCHER (&curr_page_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
7705  PGBUF_INIT_WATCHER (&old_page_watcher, PGBUF_ORDERED_HEAP_NORMAL, hfid);
7706 
7707  if (OID_ISNULL (next_oid))
7708  {
7709  if (reversed_direction)
7710  {
7711  /* Retrieve the last record of the file. */
7712  if (heap_get_last_vpid (thread_p, hfid, &vpid) != NO_ERROR)
7713  {
7714  ASSERT_ERROR ();
7715  return S_ERROR;
7716  }
7717  oid.volid = vpid.volid;
7718  oid.pageid = vpid.pageid;
7719  oid.slotid = NULL_SLOTID;
7720  }
7721  else
7722  {
7723  /* Retrieve the first object of the heap */
7724  oid.volid = hfid->vfid.volid;
7725  oid.pageid = hfid->hpgid;
7726  oid.slotid = 0; /* i.e., will get slot 1 */
7727  }
7728  }
7729  else
7730  {
7731  oid = *next_oid;
7732  }
7733 
7734 
7735  is_null_recdata = (recdes->data == NULL);
7736 
7737  /* Start looking for next object */
7738  while (true)
7739  {
7740  /* Start looking for next object in current page. If we reach the end of this page without finding a new object,
7741  * fetch next page and continue looking there. If no objects are found, end scanning */
7742  while (true)
7743  {
7744  vpid.volid = oid.volid;
7745  vpid.pageid = oid.pageid;
7746 
7747  /*
7748  * Fetch the page where the object of OID is stored. Use previous
7749  * scan page whenever possible, otherwise, deallocate the page.
7750  */
7751  if (scan_cache->cache_last_fix_page == true && scan_cache->page_watcher.pgptr != NULL)
7752  {
7753  vpidptr_incache = pgbuf_get_vpid_ptr (scan_cache->page_watcher.pgptr);
7754  if (VPID_EQ (&vpid, vpidptr_incache))
7755  {
7756  /* replace with local watcher, scan cache watcher will be changed by called functions */
7757  pgbuf_replace_watcher (thread_p, &scan_cache->page_watcher, &curr_page_watcher);
7758  }
7759  else
7760  {
7761  /* Keep previous scan page fixed until we fixed the current one */
7762  pgbuf_replace_watcher (thread_p, &scan_cache->page_watcher, &old_page_watcher);
7763  }
7764  }
7765  if (curr_page_watcher.pgptr == NULL)
7766  {
7767  curr_page_watcher.pgptr =
7769  &curr_page_watcher);
7770  if (old_page_watcher.pgptr != NULL)
7771  {
7772  pgbuf_ordered_unfix (thread_p, &old_page_watcher);
7773  }
7774  if (curr_page_watcher.pgptr == NULL)
7775  {
7776  if (er_errid () == ER_PB_BAD_PAGEID)
7777  {
7779  oid.slotid);
7780  }
7781 
7782  /* something went wrong, return */
7783  assert (scan_cache->page_watcher.pgptr == NULL);
7784  return S_ERROR;
7785  }
7786  }
7787 
7788  if (get_rec_info)
7789  {
7790  /* Getting record information means that we need to scan all slots even if they store no object. */
7791  if (reversed_direction)
7792  {
7793  scan =
7794  spage_previous_record_dont_skip_empty (curr_page_watcher.pgptr, &oid.slotid, &forward_recdes, PEEK);
7795  }
7796  else
7797  {
7798  scan =
7799  spage_next_record_dont_skip_empty (curr_page_watcher.pgptr, &oid.slotid, &forward_recdes, PEEK);
7800  }
7802  {
7803  /* skip the header */
7804  scan =
7805  spage_next_record_dont_skip_empty (curr_page_watcher.pgptr, &oid.slotid, &forward_recdes, PEEK);
7806  }
7807  }
7808  else
7809  {
7810  /* Find the next object. Skip relocated records (i.e., new_home records). This records must be accessed
7811  * through the relocation record (i.e., the object). */
7812 
7813  while (true)
7814  {
7815  if (reversed_direction)
7816  {
7817  scan = spage_previous_record (curr_page_watcher.pgptr, &oid.slotid, &forward_recdes, PEEK);
7818  }
7819  else
7820  {
7821  scan = spage_next_record (curr_page_watcher.pgptr, &oid.slotid, &forward_recdes, PEEK);
7822  }
7823  if (scan != S_SUCCESS)
7824  {
7825  /* stop */
7826  break;
7827  }
7829  {
7830  /* skip the header */
7831  continue;
7832  }
7833  type = spage_get_record_type (curr_page_watcher.pgptr, oid.slotid);
7835  {
7836  /* skip */
7837  continue;
7838  }
7839 
7840  break;
7841  }
7842  }
7843 
7844  if (scan != S_SUCCESS)
7845  {
7846  if (scan == S_END)
7847  {
7848  /* Find next page of heap and continue scanning */
7849  if (reversed_direction)
7850  {
7851  (void) heap_vpid_prev (thread_p, hfid, curr_page_watcher.pgptr, &vpid);
7852  }
7853  else
7854  {
7855  (void) heap_vpid_next (thread_p, hfid, curr_page_watcher.pgptr, &vpid);
7856  }
7857  pgbuf_replace_watcher (thread_p, &curr_page_watcher, &old_page_watcher);
7858  oid.volid = vpid.volid;
7859  oid.pageid = vpid.pageid;
7860  oid.slotid = -1;
7861  if (oid.pageid == NULL_PAGEID)
7862  {
7863  /* must be last page, end scanning */
7864  OID_SET_NULL (next_oid);
7865  if (old_page_watcher.pgptr != NULL)
7866  {
7867  pgbuf_ordered_unfix (thread_p, &old_page_watcher);
7868  }
7869  return scan;
7870  }
7871  }
7872  else
7873  {
7874  /* Error, stop scanning */
7875  if (old_page_watcher.pgptr != NULL)
7876  {
7877  pgbuf_ordered_unfix (thread_p, &old_page_watcher);
7878  }
7879  pgbuf_ordered_unfix (thread_p, &curr_page_watcher);
7880  return scan;
7881  }
7882  }
7883  else
7884  {
7885  /* found a new object */
7886  break;
7887  }
7888  }
7889 
7890  /* A record was found */
7891  if (get_rec_info)
7892  {
7893  scan =
7894  heap_get_record_info (thread_p, oid, recdes, forward_recdes, &curr_page_watcher, scan_cache, ispeeking,
7895  cache_recordinfo);
7896  }
7897  else
7898  {
7899  int cache_last_fix_page_save = scan_cache->cache_last_fix_page;
7900 
7901  scan_cache->cache_last_fix_page = true;
7902  pgbuf_replace_watcher (thread_p, &curr_page_watcher, &scan_cache->page_watcher);
7903 
7904  scan = heap_scan_get_visible_version (thread_p, &oid, class_oid, recdes, scan_cache, ispeeking, NULL_CHN);
7905  scan_cache->cache_last_fix_page = cache_last_fix_page_save;
7906 
7907  if (!cache_last_fix_page_save && scan_cache->page_watcher.pgptr)
7908  {
7909  /* restore into curr_page_watcher and unfix later */
7910  pgbuf_replace_watcher (thread_p, &scan_cache->page_watcher, &curr_page_watcher);
7911  }
7912  }
7913 
7914  if (scan == S_SUCCESS)
7915  {
7916  /*
7917  * Make sure that the found object is an instance of the desired
7918  * class. If it isn't then continue looking.
7919  */
7920  if (class_oid == NULL || OID_ISNULL (class_oid) || !OID_IS_ROOTOID (&oid))
7921  {
7922  /* stop */
7923  *next_oid = oid;
7924  break;
7925  }
7926  else
7927  {
7928  /* continue looking */
7929  if (is_null_recdata)
7930  {
7931  /* reset recdes->data before getting next record */
7932  recdes->data = NULL;
7933  }
7934  continue;
7935  }
7936  }
7937  else if (scan == S_SNAPSHOT_NOT_SATISFIED || scan == S_DOESNT_EXIST)
7938  {
7939  /* the record does not satisfies snapshot or was deleted - continue */
7940  if (is_null_recdata)
7941  {
7942  /* reset recdes->data before getting next record */
7943  recdes->data = NULL;
7944  }
7945  continue;
7946  }
7947 
7948  /* scan was not successful, stop scanning */
7949  break;
7950  }
7951 
7952  if (old_page_watcher.pgptr != NULL)
7953  {
7954  pgbuf_ordered_unfix (thread_p, &old_page_watcher);
7955  }
7956 
7957  if (curr_page_watcher.pgptr != NULL)
7958  {
7959  if (!scan_cache->cache_last_fix_page)
7960  {
7961  pgbuf_ordered_unfix (thread_p, &curr_page_watcher);
7962  }
7963  else
7964  {
7965  pgbuf_replace_watcher (thread_p, &curr_page_watcher, &scan_cache->page_watcher);
7966  }
7967  }
7968 
7969  return scan;
7970 }
7971 
7972 /*
7973  * heap_first () - Retrieve or peek first object of heap
7974  * return: SCAN_CODE (Either of S_SUCCESS, S_DOESNT_FIT, S_END, S_ERROR)
7975  * hfid(in):
7976  * class_oid(in):
7977  * oid(in/out): Object identifier of current record.
7978  * Will be set to first available record or NULL_OID when there
7979  * is not one.
7980  * recdes(in/out): Pointer to a record descriptor. Will be modified to
7981  * describe the new record.
7982  * scan_cache(in/out): Scan cache or NULL
7983  * ispeeking(in): PEEK when the object is peeked, scan_cache cannot be NULL
7984  * COPY when the object is copied
7985  *
7986  */
7987 SCAN_CODE
7988 heap_first (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * oid, RECDES * recdes,
7989  HEAP_SCANCACHE * scan_cache, int ispeeking)
7990 {
7991  /* Retrieve the first record of the file */
7992  OID_SET_NULL (oid);
7993  oid->volid = hfid->vfid.volid;
7994 
7995  return heap_next (thread_p, hfid, class_oid, oid, recdes, scan_cache, ispeeking);
7996 }
7997 
7998 /*
7999  * heap_last () - Retrieve or peek last object of heap
8000  * return: SCAN_CODE
8001  * (Either of S_SUCCESS, S_DOESNT_FIT, S_END,
8002  * S_ERROR)
8003  * hfid(in):
8004  * class_oid(in):
8005  * oid(in/out): Object identifier of current record.
8006  * Will be set to last available record or NULL_OID when there is
8007  * not one.
8008  * recdes(in/out): Pointer to a record descriptor. Will be modified to
8009  * describe the new record.
8010  * scan_cache(in/out): Scan cache or NULL
8011  * ispeeking(in): PEEK when the object is peeked, scan_cache cannot be NULL
8012  * COPY when the object is copied
8013  *
8014  */
8015 SCAN_CODE
8016 heap_last (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * oid, RECDES * recdes,
8017  HEAP_SCANCACHE * scan_cache, int ispeeking)
8018 {
8019  /* Retrieve the first record of the file */
8020  OID_SET_NULL (oid);
8021  oid->volid = hfid->vfid.volid;
8022 
8023  return heap_prev (thread_p, hfid, class_oid, oid, recdes, scan_cache, ispeeking);
8024 }
8025 
8026 #if defined (ENABLE_UNUSED_FUNCTION)
8027 /*
8028  * heap_cmp () - Compare heap object with current content
8029  * return: int (> 0 recdes is larger,
8030  * < 0 recdes is smaller, and
8031  * = 0 same)
8032  * oid(in): The object to compare
8033  * recdes(in): Compare object against this content
8034  *
8035  * Note: Compare the heap object against given content in ASCII format.
8036  */
8037 int
8038 heap_cmp (THREAD_ENTRY * thread_p, const OID * oid, RECDES * recdes)
8039 {
8040  HEAP_SCANCACHE scan_cache;
8041  RECDES peek_recdes;
8042  int compare;
8043 
8044  heap_scancache_quick_start (&scan_cache);
8045  if (heap_get (thread_p, oid, &peek_recdes, &scan_cache, PEEK, NULL_CHN) != S_SUCCESS)
8046  {
8047  compare = 1;
8048  }
8049  else if (recdes->length > peek_recdes.length)
8050  {
8051  compare = memcmp (recdes->data, peek_recdes.data, peek_recdes.length);
8052  if (compare == 0)
8053  {
8054  compare = 1;
8055  }
8056  }
8057  else
8058  {
8059  compare = memcmp (recdes->data, peek_recdes.data, recdes->length);
8060  if (compare == 0 && recdes->length != peek_recdes.length)
8061  {
8062  compare = -1;
8063  }
8064  }
8065 
8066  heap_scancache_end (thread_p, &scan_cache);
8067 
8068  return compare;
8069 }
8070 #endif /* ENABLE_UNUSED_FUNCTION */
8071 
8072 /*
8073  * heap_scanrange_start () - Initialize a scanrange cursor
8074  * return: NO_ERROR
8075  * scan_range(in/out): Scan range
8076  * hfid(in): Heap file identifier
8077  * class_oid(in): Class identifier
8078  * For any class, NULL or NULL_OID can be given
8079  *
8080  * Note: A scanrange structure is initialized. The scanrange structure
8081  * is used to define a scan range (set of objects) and to cache
8082  * information about the latest fetched page and memory allocated
8083  * by the scan functions. This information is used in future
8084  * scans, for example, to avoid hashing for the same page in the
8085  * page buffer pool or defining another allocation area.
8086  * The caller is responsible for declaring the end of a scan
8087  * range so that the fixed pages and allocated memory are freed.
8088  * Using many scans at the same time should be avoided since page
8089  * buffers are fixed and locked for future references and there
8090  * is a limit of buffers in the page buffer pool. This is
8091  * analogous to fetching many pages at the same time.
8092  */
8093 int
8094 heap_scanrange_start (THREAD_ENTRY * thread_p, HEAP_SCANRANGE * scan_range, const HFID * hfid, const OID * class_oid,
8096 {
8097  int ret = NO_ERROR;
8098 
8099  /* Start the scan cache */
8100  ret = heap_scancache_start (thread_p, &scan_range->scan_cache, hfid, class_oid, true, false, mvcc_snapshot);
8101  if (ret != NO_ERROR)
8102  {
8103  goto exit_on_error;
8104  }
8105 
8106  OID_SET_NULL (&scan_range->first_oid);
8107  scan_range->first_oid.volid = hfid->vfid.volid;
8108  scan_range->last_oid = scan_range->first_oid;
8109 
8110  return ret;
8111 
8112 exit_on_error:
8113 
8114  OID_SET_NULL (&scan_range->first_oid);
8115  OID_SET_NULL (&scan_range->last_oid);
8116 
8117  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
8118 }
8119 
8120 /*
8121  * heap_scanrange_end () - End of a scanrange
8122  * return:
8123  * scan_range(in/out): Scanrange
8124  *
8125  * Note: Any fixed heap page on the given scan is freed and any memory
8126  * allocated by this scan is also freed. The scan_range structure is undefined.
8127  */
8128 void
8130 {
8131  /* Finish the scan cache */
8132  heap_scancache_end (thread_p, &scan_range->scan_cache);
8133  OID_SET_NULL (&scan_range->first_oid);
8134  OID_SET_NULL (&scan_range->last_oid);
8135 }
8136 
8137 /*
8138  * heap_scanrange_to_following () - Define the following scanrange
8139  * return: SCAN_CODE
8140  * (Either of S_SUCCESS, S_END, S_ERROR)
8141  * scan_range(in/out): Scanrange
8142  * start_oid(in): Desired OID for first element in the scanrange or NULL
8143  *
8144  * Note: The range of a scanrange is defined. The scanrange is defined
8145  * as follows:
8146  * a: When start_oid == NULL, the first scanrange object is the
8147  * next object after the last object in the previous scanrange
8148  * b: When start_oid is the same as a NULL_OID, the first object
8149  * is the first heap object.
8150  * c: The first object in the scanrange is the given object.
8151  * The last object in the scanrange is either the first object in
8152  * the scanrange or the one after the first object which is not a
8153  * relocated or multipage object.
8154  */
8155 SCAN_CODE
8156 heap_scanrange_to_following (THREAD_ENTRY * thread_p, HEAP_SCANRANGE * scan_range, OID * start_oid)
8157 {
8158  SCAN_CODE scan;
8159  RECDES recdes;
8160  INT16 slotid;
8161  VPID *vpid;
8162 
8163  if (HEAP_DEBUG_ISVALID_SCANRANGE (scan_range) != DISK_VALID)
8164  {
8165  return S_ERROR;
8166  }
8167 
8168  if (start_oid != NULL)
8169  {
8170  if (OID_ISNULL (start_oid))
8171  {
8172  /* Scanrange starts at first heap object */
8173  scan =
8174  heap_first (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid,
8175  &scan_range->first_oid, &recdes, &scan_range->scan_cache, PEEK);
8176  if (scan != S_SUCCESS)
8177  {
8178  return scan;
8179  }
8180  }
8181  else
8182  {
8183  /* Scanrange starts with the given object */
8184  scan_range->first_oid = *start_oid;
8185  scan = heap_get_visible_version (thread_p, &scan_range->last_oid, &scan_range->scan_cache.node.class_oid,
8186  &recdes, &scan_range->scan_cache, PEEK, NULL_CHN);
8187  if (scan != S_SUCCESS)
8188  {
8189  if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED)
8190  {
8191  scan =
8192  heap_next (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid,
8193  &scan_range->first_oid, &recdes, &scan_range->scan_cache, PEEK);
8194  if (scan != S_SUCCESS)
8195  {
8196  return scan;
8197  }
8198  }
8199  else
8200  {
8201  return scan;
8202  }
8203  }
8204  }
8205  }
8206  else
8207  {
8208  /*
8209  * Scanrange ends with the prior object after the first object in the
8210  * the previous scanrange
8211  */
8212  scan_range->first_oid = scan_range->last_oid;
8213  scan =
8214  heap_next (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid,
8215  &scan_range->first_oid, &recdes, &scan_range->scan_cache, PEEK);
8216  if (scan != S_SUCCESS)
8217  {
8218  return scan;
8219  }
8220  }
8221 
8222 
8223  scan_range->last_oid = scan_range->first_oid;
8224  if (scan_range->scan_cache.page_watcher.pgptr != NULL
8225  && (vpid = pgbuf_get_vpid_ptr (scan_range->scan_cache.page_watcher.pgptr)) != NULL
8226  && (vpid->pageid == scan_range->last_oid.pageid) && (vpid->volid == scan_range->last_oid.volid)
8227  && spage_get_record_type (scan_range->scan_cache.page_watcher.pgptr, scan_range->last_oid.slotid) == REC_HOME)
8228  {
8229  slotid = scan_range->last_oid.slotid;
8230  while (true)
8231  {
8232  if (spage_next_record (scan_range->scan_cache.page_watcher.pgptr, &slotid, &recdes, PEEK) != S_SUCCESS
8233  || spage_get_record_type (scan_range->scan_cache.page_watcher.pgptr, slotid) != REC_HOME)
8234  {
8235  break;
8236  }
8237  else
8238  {
8239  scan_range->last_oid.slotid = slotid;
8240  }
8241  }
8242  }
8243 
8244  return scan;
8245 }
8246 
8247 /*
8248  * heap_scanrange_to_prior () - Define the prior scanrange
8249  * return: SCAN_CODE
8250  * (Either of S_SUCCESS, S_END, S_ERROR)
8251  * scan_range(in/out): Scanrange
8252  * last_oid(in): Desired OID for first element in the scanrange or NULL
8253  *
8254  * Note: The range of a scanrange is defined. The scanrange is defined
8255  * as follows:
8256  * a: When last_oid == NULL, the last scanrange object is the
8257  * prior object after the first object in the previous
8258  * scanrange.
8259  * b: When last_oid is the same as a NULL_OID, the last object is
8260  * is the last heap object.
8261  * c: The last object in the scanrange is the given object.
8262  * The first object in the scanrange is either the last object in
8263  * the scanrange or the one before the first object which is not
8264  * a relocated or multipage object.
8265  */
8266 SCAN_CODE
8267 heap_scanrange_to_prior (THREAD_ENTRY * thread_p, HEAP_SCANRANGE * scan_range, OID * last_oid)
8268 {
8269  SCAN_CODE scan;
8270  RECDES recdes;
8271  INT16 slotid;
8272 
8273  if (HEAP_DEBUG_ISVALID_SCANRANGE (scan_range) != DISK_VALID)
8274  {
8275  return S_ERROR;
8276  }
8277 
8278  if (last_oid != NULL)
8279  {
8280  if (OID_ISNULL (last_oid))
8281  {
8282  /* Scanrange ends at last heap object */
8283  scan =
8284  heap_last (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid,
8285  &scan_range->last_oid, &recdes, &scan_range->scan_cache, PEEK);
8286  if (scan != S_SUCCESS)
8287  {
8288  return scan;
8289  }
8290  }
8291  else
8292  {
8293  /* Scanrange ends with the given object */
8294  scan_range->last_oid = *last_oid;
8295  scan =
8296  heap_get_visible_version (thread_p, &scan_range->last_oid, &scan_range->scan_cache.node.class_oid, &recdes,
8297  &scan_range->scan_cache, PEEK, NULL_CHN);
8298  if (scan != S_SUCCESS)
8299  {
8300  if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED)
8301  {
8302  scan =
8303  heap_prev (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid,
8304  &scan_range->first_oid, &recdes, &scan_range->scan_cache, PEEK);
8305  if (scan != S_SUCCESS)
8306  {
8307  return scan;
8308  }
8309  }
8310  }
8311  }
8312  }
8313  else
8314  {
8315  /*
8316  * Scanrange ends with the prior object after the first object in the
8317  * the previous scanrange
8318  */
8319  scan_range->last_oid = scan_range->first_oid;
8320  scan =
8321  heap_prev (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid,
8322  &scan_range->last_oid, &recdes, &scan_range->scan_cache, PEEK);
8323  if (scan != S_SUCCESS)
8324  {
8325  return scan;
8326  }
8327  }
8328 
8329  /*
8330  * Now define the first object for the scanrange. A scanrange range starts
8331  * when a relocated or multipage object is found or when the last object is
8332  * the page is found.
8333  */
8334 
8335  scan_range->first_oid = scan_range->last_oid;
8336  if (scan_range->scan_cache.page_watcher.pgptr != NULL)
8337  {
8338  slotid = scan_range->first_oid.slotid;
8339  while (true)
8340  {
8341  if (spage_previous_record (scan_range->scan_cache.page_watcher.pgptr, &slotid, &recdes, PEEK) != S_SUCCESS
8342  || slotid == HEAP_HEADER_AND_CHAIN_SLOTID
8343  || spage_get_record_type (scan_range->scan_cache.page_watcher.pgptr, slotid) != REC_HOME)
8344  {
8345  break;
8346  }
8347  else
8348  {
8349  scan_range->first_oid.slotid = slotid;
8350  }
8351  }
8352  }
8353 
8354  return scan;
8355 }
8356 
8357 /*
8358  * heap_scanrange_next () - Retrieve or peek next object in the scanrange
8359  * return: SCAN_CODE
8360  * (Either of S_SUCCESS, S_DOESNT_FIT, S_END,
8361  * S_ERROR)
8362  * next_oid(in/out): Object identifier of current record.
8363  * Will be set to next available record or NULL_OID when
8364  * there is not one.
8365  * recdes(in/out): Pointer to a record descriptor. Will be modified to
8366  * describe the new record.
8367  * scan_range(in/out): Scan range ... Cannot be NULL
8368  * ispeeking(in): PEEK when the object is peeked,
8369  * COPY when the object is copied
8370  *
8371  */
8372 SCAN_CODE
8373 heap_scanrange_next (THREAD_ENTRY * thread_p, OID * next_oid, RECDES * recdes, HEAP_SCANRANGE * scan_range,
8374  int ispeeking)
8375 {
8376  SCAN_CODE scan;
8377 
8378  if (HEAP_DEBUG_ISVALID_SCANRANGE (scan_range) != DISK_VALID)
8379  {
8380  return S_ERROR;
8381  }
8382 
8383  /*
8384  * If next_oid is less than the first OID in the scanrange.. get the first
8385  * object
8386  */
8387 
8388  if (OID_ISNULL (next_oid) || OID_LT (next_oid, &scan_range->first_oid))
8389  {
8390  /* Retrieve the first object in the scanrange */
8391  *next_oid = scan_range->first_oid;
8392  scan =
8393  heap_get_visible_version (thread_p, next_oid, &scan_range->scan_cache.node.class_oid, recdes,
8394  &scan_range->scan_cache, ispeeking, NULL_CHN);
8395  if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED)
8396  {
8397  scan =
8398  heap_next (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid, next_oid,
8399  recdes, &scan_range->scan_cache, ispeeking);
8400  }
8401  /* Make sure that we did not go overboard */
8402  if (scan == S_SUCCESS && OID_GT (next_oid, &scan_range->last_oid))
8403  {
8404  OID_SET_NULL (next_oid);
8405  scan = S_END;
8406  }
8407  }
8408  else
8409  {
8410  /* Make sure that this is not the last OID in the scanrange */
8411  if (OID_EQ (next_oid, &scan_range->last_oid))
8412  {
8413  OID_SET_NULL (next_oid);
8414  scan = S_END;
8415  }
8416  else
8417  {
8418  scan =
8419  heap_next (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid, next_oid,
8420  recdes, &scan_range->scan_cache, ispeeking);
8421  /* Make sure that we did not go overboard */
8422  if (scan == S_SUCCESS && OID_GT (next_oid, &scan_range->last_oid))
8423  {
8424  OID_SET_NULL (next_oid);
8425  scan = S_END;
8426  }
8427  }
8428  }
8429 
8430  return scan;
8431 }
8432 
8433 #if defined (ENABLE_UNUSED_FUNCTION)
8434 /*
8435  * heap_scanrange_prev () - RETRIEVE OR PEEK NEXT OBJECT IN THE SCANRANGE
8436  * return:
8437  * returns/side-effects: SCAN_CODE
8438  * (Either of S_SUCCESS, S_DOESNT_FIT, S_END,
8439  * S_ERROR)
8440  * prev_oid(in/out): Object identifier of current record.
8441  * Will be set to previous available record or NULL_OID when
8442  * there is not one.
8443  * recdes(in/out): Pointer to a record descriptor. Will be modified to
8444  * describe the new record.
8445  * scan_range(in/out): Scan range ... Cannot be NULL
8446  * ispeeking(in): PEEK when the object is peeked,
8447  * COPY when the object is copied
8448  *
8449  */
8450 SCAN_CODE
8451 heap_scanrange_prev (THREAD_ENTRY * thread_p, OID * prev_oid, RECDES * recdes, HEAP_SCANRANGE * scan_range,
8452  int ispeeking)
8453 {
8454  SCAN_CODE scan;
8455 
8456  if (HEAP_DEBUG_ISVALID_SCANRANGE (scan_range) != DISK_VALID)
8457  {
8458  return S_ERROR;
8459  }
8460 
8461  if (OID_ISNULL (prev_oid) || OID_GT (prev_oid, &scan_range->last_oid))
8462  {
8463  /* Retrieve the last object in the scanrange */
8464  *prev_oid = scan_range->last_oid;
8465  scan = heap_get (thread_p, prev_oid, recdes, &scan_range->scan_cache, ispeeking, NULL_CHN);
8466  if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED)
8467  {
8468  scan =
8469  heap_prev (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid, prev_oid,
8470  recdes, &scan_range->scan_cache, ispeeking);
8471  }
8472  /* Make sure that we did not go underboard */
8473  if (scan == S_SUCCESS && OID_LT (prev_oid, &scan_range->last_oid))
8474  {
8475  OID_SET_NULL (prev_oid);
8476  scan = S_END;
8477  }
8478  }
8479  else
8480  {
8481  /* Make sure that this is not the first OID in the scanrange */
8482  if (OID_EQ (prev_oid, &scan_range->first_oid))
8483  {
8484  OID_SET_NULL (prev_oid);
8485  scan = S_END;
8486  }
8487  else
8488  {
8489  scan =
8490  heap_prev (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid, prev_oid,
8491  recdes, &scan_range->scan_cache, ispeeking);
8492  if (scan == S_SUCCESS && OID_LT (prev_oid, &scan_range->last_oid))
8493  {
8494  OID_SET_NULL (prev_oid);
8495  scan = S_END;
8496  }
8497  }
8498  }
8499 
8500  return scan;
8501 }
8502 
8503 /*
8504  * heap_scanrange_first () - Retrieve or peek first object in the scanrange
8505  * return: SCAN_CODE
8506  * (Either of S_SUCCESS, S_DOESNT_FIT, S_END,
8507  * S_ERROR)
8508  * first_oid(in/out): Object identifier.
8509  * Set to first available record or NULL_OID when there
8510  * is not one.
8511  * recdes(in/out): Pointer to a record descriptor. Will be modified to
8512  * describe the new record.
8513  * scan_range(in/out): Scan range ... Cannot be NULL
8514  * ispeeking(in): PEEK when the object is peeked,
8515  * COPY when the object is copied
8516  *
8517  */
8518 SCAN_CODE
8519 heap_scanrange_first (THREAD_ENTRY * thread_p, OID * first_oid, RECDES * recdes, HEAP_SCANRANGE * scan_range,
8520  int ispeeking)
8521 {
8522  SCAN_CODE scan;
8523 
8524  if (HEAP_DEBUG_ISVALID_SCANRANGE (scan_range) != DISK_VALID)
8525  {
8526  return S_ERROR;
8527  }
8528 
8529  /* Retrieve the first object in the scanrange */
8530  *first_oid = scan_range->first_oid;
8531  scan = heap_get (thread_p, first_oid, recdes, &scan_range->scan_cache, ispeeking, NULL_CHN);
8532  if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED)
8533  {
8534  scan =
8535  heap_next (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid, first_oid,
8536  recdes, &scan_range->scan_cache, ispeeking);
8537  }
8538  /* Make sure that we did not go overboard */
8539  if (scan == S_SUCCESS && OID_GT (first_oid, &scan_range->last_oid))
8540  {
8541  OID_SET_NULL (first_oid);
8542  scan = S_END;
8543  }
8544 
8545  return scan;
8546 }
8547 
8548 /*
8549  * heap_scanrange_last () - Retrieve or peek last object in the scanrange
8550  * return: SCAN_CODE
8551  * (Either of S_SUCCESS, S_DOESNT_FIT, S_END,
8552  * S_ERROR)
8553  * last_oid(in/out): Object identifier.
8554  * Set to last available record or NULL_OID when there is
8555  * not one
8556  * recdes(in/out): Pointer to a record descriptor. Will be modified to
8557  * describe the new record.
8558  * scan_range(in/out): Scan range ... Cannot be NULL
8559  * ispeeking(in): PEEK when the object is peeked,
8560  * COPY when the object is copied
8561  *
8562  */
8563 SCAN_CODE
8564 heap_scanrange_last (THREAD_ENTRY * thread_p, OID * last_oid, RECDES * recdes, HEAP_SCANRANGE * scan_range,
8565  int ispeeking)
8566 {
8567  SCAN_CODE scan;
8568 
8569  if (HEAP_DEBUG_ISVALID_SCANRANGE (scan_range) != DISK_VALID)
8570  {
8571  return S_ERROR;
8572  }
8573 
8574  /* Retrieve the last object in the scanrange */
8575  *last_oid = scan_range->last_oid;
8576  scan = heap_get (thread_p, last_oid, recdes, &scan_range->scan_cache, ispeeking, NULL_CHN);
8577  if (scan == S_DOESNT_EXIST || scan == S_SNAPSHOT_NOT_SATISFIED)
8578  {
8579  scan =
8580  heap_prev (thread_p, &scan_range->scan_cache.node.hfid, &scan_range->scan_cache.node.class_oid, last_oid,
8581  recdes, &scan_range->scan_cache, ispeeking);
8582  }
8583  /* Make sure that we did not go underboard */
8584  if (scan == S_SUCCESS && OID_LT (last_oid, &scan_range->last_oid))
8585  {
8586  OID_SET_NULL (last_oid);
8587  scan = S_END;
8588  }
8589 
8590  return scan;
8591 }
8592 #endif
8593 
8594 /*
8595  * heap_does_exist () - Does object exist?
8596  * return: true/false
8597  * class_oid(in): Class identifier of object or NULL
8598  * oid(in): Object identifier
8599  *
8600  * Note: Check if the object associated with the given OID exist.
8601  * If the class of the object does not exist, the object does not
8602  * exist either. If the class is not given or a NULL_OID is
8603  * passed, the function finds the class oid.
8604  */
8605 bool
8606 heap_does_exist (THREAD_ENTRY * thread_p, OID * class_oid, const OID * oid)
8607 {
8608  VPID vpid;
8609  OID tmp_oid;
8610  PGBUF_WATCHER pg_watcher;
8611  bool doesexist = true;
8612  INT16 rectype;
8613  bool old_check_interrupt;
8614  int old_wait_msec;
8615 
8617 
8618  old_check_interrupt = logtb_set_check_interrupt (thread_p, false);
8619  old_wait_msec = xlogtb_reset_wait_msecs (thread_p, LK_INFINITE_WAIT);
8620 
8621  if (HEAP_ISVALID_OID (thread_p, oid) != DISK_VALID)
8622  {
8623  doesexist = false;
8624  goto exit_on_end;
8625  }
8626 
8627  /*
8628  * If the class is not NULL and it is different from the Rootclass,
8629  * make sure that it exist. Rootclass always exist.. not need to check
8630  * for it
8631  */
8632  if (class_oid != NULL && !OID_EQ (class_oid, oid_Root_class_oid)
8633  && HEAP_ISVALID_OID (thread_p, class_oid) != DISK_VALID)
8634  {
8635  doesexist = false;
8636  goto exit_on_end;
8637  }
8638 
8639  while (doesexist)
8640  {
8641  if (oid->slotid == HEAP_HEADER_AND_CHAIN_SLOTID || oid->slotid < 0 || oid->pageid < 0 || oid->volid < 0)
8642  {
8643  doesexist = false;
8644  goto exit_on_end;
8645  }
8646 
8647  vpid.volid = oid->volid;
8648  vpid.pageid = oid->pageid;
8649 
8650  /* Fetch the page where the record is stored */
8651 
8652  pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, &pg_watcher);
8653  if (pg_watcher.pgptr == NULL)
8654  {
8655  if (er_errid () == ER_PB_BAD_PAGEID)
8656  {
8658  oid->slotid);
8659  }
8660 
8661  /* something went wrong, give up */
8662  doesexist = false;
8663  goto exit_on_end;
8664  }
8665 
8666  doesexist = spage_is_slot_exist (pg_watcher.pgptr, oid->slotid);
8667  rectype = spage_get_record_type (pg_watcher.pgptr, oid->slotid);
8668 
8669  /*
8670  * Check the class
8671  */
8672 
8673  if (doesexist && rectype != REC_ASSIGN_ADDRESS)
8674  {
8675  if (class_oid == NULL)
8676  {
8677  class_oid = &tmp_oid;
8678  OID_SET_NULL (class_oid);
8679  }
8680 
8681  if (OID_ISNULL (class_oid))
8682  {
8683  /*
8684  * Caller does not know the class of the object. Get the class
8685  * identifier from disk
8686  */
8687  if (heap_get_class_oid_from_page (thread_p, pg_watcher.pgptr, class_oid) != NO_ERROR)
8688  {
8689  assert_release (false);
8690  doesexist = false;
8691  goto exit_on_end;
8692  }
8693  assert (!OID_ISNULL (class_oid));
8694  }
8695 
8696  pgbuf_ordered_unfix (thread_p, &pg_watcher);
8697 
8698  /* If doesexist is true, then check its class */
8699  if (!OID_IS_ROOTOID (class_oid))
8700  {
8701  /*
8702  * Make sure that the class exist too. Loop with this
8703  */
8704  oid = class_oid;
8705  class_oid = oid_Root_class_oid;
8706  }
8707  else
8708  {
8709  break;
8710  }
8711  }
8712  else
8713  {
8714  break;
8715  }
8716  }
8717 
8718 exit_on_end:
8719 
8720  if (pg_watcher.pgptr != NULL)
8721  {
8722  pgbuf_ordered_unfix (thread_p, &pg_watcher);
8723  }
8724 
8725  (void) logtb_set_check_interrupt (thread_p, old_check_interrupt);
8726  (void) xlogtb_reset_wait_msecs (thread_p, old_wait_msec);
8727 
8728  return doesexist;
8729 }
8730 
8731 /*
8732  * heap_is_object_not_null () - Check if object should be considered not NULL.
8733  *
8734  * return : True if object is visible or too new, false if it is deleted or if errors occur.
8735  * thread_p (in) : Thread entry.
8736  * class_oid (in) : Class OID.
8737  * oid (in) : Instance OID.
8738  */
8739 bool
8740 heap_is_object_not_null (THREAD_ENTRY * thread_p, OID * class_oid, const OID * oid)
8741 {
8742  bool old_check_interrupt = logtb_set_check_interrupt (thread_p, false);
8743  bool doesexist = false;
8744  HEAP_SCANCACHE scan_cache;
8745  SCAN_CODE scan = S_SUCCESS;
8746  OID local_class_oid = OID_INITIALIZER;
8747  MVCC_SNAPSHOT *mvcc_snapshot_ptr;
8748  MVCC_SNAPSHOT copy_mvcc_snapshot;
8749  bool is_scancache_started = false;
8750 
8751  er_stack_push ();
8752 
8753  if (HEAP_ISVALID_OID (thread_p, oid) != DISK_VALID)
8754  {
8755  goto exit_on_end;
8756  }
8757 
8758  /*
8759  * If the class is not NULL and it is different from the Root class,
8760  * make sure that it exist. Root class always exist.. not need to check for it
8761  */
8762  if (class_oid != NULL && !OID_EQ (class_oid, oid_Root_class_oid)
8763  && HEAP_ISVALID_OID (thread_p, class_oid) != DISK_VALID)
8764  {
8765  goto exit_on_end;
8766  }
8767  if (class_oid == NULL)
8768  {
8769  class_oid = &local_class_oid;
8770  }
8771 
8772  if (heap_scancache_quick_start (&scan_cache) != NO_ERROR)
8773  {
8774  goto exit_on_end;
8775  }
8776  is_scancache_started = true;
8777 
8778  mvcc_snapshot_ptr = logtb_get_mvcc_snapshot (thread_p);
8779  if (mvcc_snapshot_ptr == NULL)
8780  {
8781  assert (false);
8782  goto exit_on_end;
8783  }
8784  /* Make a copy of snapshot. We need all MVCC information, but we also want to change the visibility function. */
8785  copy_mvcc_snapshot = *mvcc_snapshot_ptr;
8786  copy_mvcc_snapshot.snapshot_fnc = mvcc_is_not_deleted_for_snapshot;
8787  scan_cache.mvcc_snapshot = &copy_mvcc_snapshot;
8788 
8789  /* Check only if the last version of the object is not deleted, see mvcc_is_not_deleted_for_snapshot return values */
8790  scan = heap_get_visible_version (thread_p, oid, class_oid, NULL, &scan_cache, PEEK, NULL_CHN);
8791  if (scan != S_SUCCESS)
8792  {
8793  goto exit_on_end;
8794  }
8795  assert (!OID_ISNULL (class_oid));
8796 
8797  /* Check class exists. */
8798  doesexist = heap_does_exist (thread_p, oid_Root_class_oid, class_oid);
8799 
8800 exit_on_end:
8801  (void) logtb_set_check_interrupt (thread_p, old_check_interrupt);
8802 
8803  if (is_scancache_started)
8804  {
8805  heap_scancache_end (thread_p, &scan_cache);
8806  }
8807 
8808  /* We don't need to propagate errors from here. */
8809  er_stack_pop ();
8810 
8811  return doesexist;
8812 }
8813 
8814 /*
8815  * heap_get_num_objects () - Count the number of objects
8816  * return: number of records or -1 in case of an error
8817  * hfid(in): Object heap file identifier
8818  * npages(in):
8819  * nobjs(in):
8820  * avg_length(in):
8821  *
8822  * Note: Count the number of objects stored on the given heap.
8823  * This function is expensive since all pages of the heap are
8824  * fetched to find the number of objects.
8825  */
8826 int
8827 heap_get_num_objects (THREAD_ENTRY * thread_p, const HFID * hfid, int *npages, int *nobjs, int *avg_length)
8828 {
8829  VPID vpid; /* Page-volume identifier */
8830  LOG_DATA_ADDR addr_hdr; /* Address of logging data */
8831  RECDES hdr_recdes; /* Record descriptor to point to space statistics */
8832  HEAP_HDR_STATS *heap_hdr; /* Heap header */
8833  PGBUF_WATCHER hdr_pg_watcher;
8834 
8835  /*
8836  * Get the heap header in exclusive mode and call the synchronization to
8837  * update the statistics of the heap. The number of record/objects is
8838  * updated.
8839  */
8840 
8841  PGBUF_INIT_WATCHER (&hdr_pg_watcher, PGBUF_ORDERED_HEAP_HDR, hfid);
8842 
8843  vpid.volid = hfid->vfid.volid;
8844  vpid.pageid = hfid->hpgid;
8845 
8846  addr_hdr.vfid = &hfid->vfid;
8848 
8849  if (pgbuf_ordered_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &hdr_pg_watcher) != NO_ERROR)
8850  {
8851  return ER_FAILED;
8852  }
8853 
8854  (void) pgbuf_check_page_ptype (thread_p, hdr_pg_watcher.pgptr, PAGE_HEAP);
8855 
8856  if (spage_get_record (thread_p, hdr_pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
8857  {
8858  pgbuf_ordered_unfix (thread_p, &hdr_pg_watcher);
8859  return ER_FAILED;
8860  }
8861 
8862  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
8863  if (heap_stats_sync_bestspace (thread_p, hfid, heap_hdr, pgbuf_get_vpid_ptr (hdr_pg_watcher.pgptr), true, true) < 0)
8864  {
8865  pgbuf_ordered_unfix (thread_p, &hdr_pg_watcher);
8866  return ER_FAILED;
8867  }
8868  *npages = heap_hdr->estimates.num_pages;
8869  *nobjs = heap_hdr->estimates.num_recs;
8870  if (*nobjs > 0)
8871  {
8872  *avg_length = (int) ((heap_hdr->estimates.recs_sumlen / (float) *nobjs) + 0.9);
8873  }
8874  else
8875  {
8876  *avg_length = 0;
8877  }
8878 
8879  addr_hdr.pgptr = hdr_pg_watcher.pgptr;
8880  log_skip_logging (thread_p, &addr_hdr);
8881  pgbuf_ordered_set_dirty_and_free (thread_p, &hdr_pg_watcher);
8882 
8883  return *nobjs;
8884 }
8885 
8886 /*
8887  * heap_estimate () - Estimate the number of pages, objects, average length
8888  * return: number of pages estimated or -1 in case of an error
8889  * hfid(in): Object heap file identifier
8890  * npages(in):
8891  * nobjs(in):
8892  * avg_length(in):
8893  *
8894  * Note: Estimate the number of pages, objects, and average length of objects.
8895  */
8896 int
8897 heap_estimate (THREAD_ENTRY * thread_p, const HFID * hfid, int *npages, int *nobjs, int *avg_length)
8898 {
8899  VPID vpid; /* Page-volume identifier */
8900  PAGE_PTR hdr_pgptr = NULL; /* Page pointer */
8901  RECDES hdr_recdes; /* Record descriptor to point to space statistics */
8902  HEAP_HDR_STATS *heap_hdr; /* Heap header */
8903 
8904  /*
8905  * Get the heap header in shared mode since it is an estimation of the
8906  * number of objects.
8907  */
8908 
8909  vpid.volid = hfid->vfid.volid;
8910  vpid.pageid = hfid->hpgid;
8911 
8913  if (hdr_pgptr == NULL)
8914  {
8915  /* something went wrong. Unable to fetch header page */
8916  return ER_FAILED;
8917  }
8918 
8919  (void) pgbuf_check_page_ptype (thread_p, hdr_pgptr, PAGE_HEAP);
8920 
8921  if (spage_get_record (thread_p, hdr_pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
8922  {
8923  pgbuf_unfix_and_init (thread_p, hdr_pgptr);
8924  return ER_FAILED;
8925  }
8926 
8927  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
8928  *npages = heap_hdr->estimates.num_pages;
8929  *nobjs = heap_hdr->estimates.num_recs;
8930  if (*nobjs > 0)
8931  {
8932  *avg_length = (int) ((heap_hdr->estimates.recs_sumlen / (float) *nobjs) + 0.9);
8933  }
8934  else
8935  {
8936  *avg_length = 0;
8937  }
8938 
8939  pgbuf_unfix_and_init (thread_p, hdr_pgptr);
8940 
8941  return *npages;
8942 }
8943 
8944 /*
8945  * heap_estimate_num_objects () - Estimate the number of objects
8946  * return: number of records estimated or -1 in case of an error
8947  * hfid(in): Object heap file identifier
8948  *
8949  * Note: Estimate the number of objects stored on the given heap.
8950  */
8951 int
8953 {
8954  int ignore_npages = -1;
8955  int ignore_avg_reclen = -1;
8956  int nobjs = -1;
8957 
8958  if (heap_estimate (thread_p, hfid, &ignore_npages, &nobjs, &ignore_avg_reclen) == -1)
8959  {
8960  return ER_FAILED;
8961  }
8962 
8963  return nobjs;
8964 }
8965 
8966 /*
8967  * heap_estimate_avg_length () - Estimate the average length of records
8968  * return: avg length
8969  * hfid(in): Object heap file identifier
8970  *
8971  * Note: Estimate the avergae length of the objects stored on the heap.
8972  * This function is mainly used when we are creating the OID of
8973  * an object of which we do not know its length. Mainly for
8974  * loaddb during forward references to other objects.
8975  */
8976 static int
8977 heap_estimate_avg_length (THREAD_ENTRY * thread_p, const HFID * hfid)
8978 {
8979  int ignore_npages;
8980  int ignore_nobjs;
8981  int avg_reclen;
8982 
8983  if (heap_estimate (thread_p, hfid, &ignore_npages, &ignore_nobjs, &avg_reclen) == -1)
8984  {
8985  return ER_FAILED;
8986  }
8987 
8988  return avg_reclen;
8989 }
8990 
8991 /*
8992  * heap_get_capacity () - Find space consumed by heap
8993  * return: NO_ERROR
8994  * hfid(in): Object heap file identifier
8995  * num_recs(in/out): Total Number of objects
8996  * num_recs_relocated(in/out):
8997  * num_recs_inovf(in/out):
8998  * num_pages(in/out): Total number of heap pages
8999  * avg_freespace(in/out): Average free space per page
9000  * avg_freespace_nolast(in/out): Average free space per page without taking in
9001  * consideration last page
9002  * avg_reclength(in/out): Average object length
9003  * avg_overhead(in/out): Average overhead per page
9004  *
9005  * Note: Find the current storage facts/capacity for given heap.
9006  */
9007 static int
9008 heap_get_capacity (THREAD_ENTRY * thread_p, const HFID * hfid, INT64 * num_recs, INT64 * num_recs_relocated,
9009  INT64 * num_recs_inovf, INT64 * num_pages, int *avg_freespace, int *avg_freespace_nolast,
9010  int *avg_reclength, int *avg_overhead)
9011 {
9012  VPID vpid; /* Page-volume identifier */
9013  RECDES recdes; /* Header record descriptor */
9014  INT16 slotid; /* Slot of one object */
9015  OID *ovf_oid;
9016  int last_freespace;
9017  int ovf_len;
9018  int ovf_num_pages;
9019  int ovf_free_space;
9020  int ovf_overhead;
9021  int j;
9022  INT16 type = REC_UNKNOWN;
9023  int ret = NO_ERROR;
9024  INT64 sum_freespace = 0;
9025  INT64 sum_reclength = 0;
9026  INT64 sum_overhead = 0;
9027  PGBUF_WATCHER pg_watcher;
9028  PGBUF_WATCHER old_pg_watcher;
9029 
9032 
9033  *num_recs = 0;
9034  *num_pages = 0;
9035  *avg_freespace = 0;
9036  *avg_reclength = 0;
9037  *avg_overhead = 0;
9038  *num_recs_relocated = 0;
9039  *num_recs_inovf = 0;
9040  last_freespace = 0;
9041 
9042  vpid.volid = hfid->vfid.volid;
9043  vpid.pageid = hfid->hpgid;
9044 
9045  while (!VPID_ISNULL (&vpid))
9046  {
9047  pg_watcher.pgptr =
9049  if (old_pg_watcher.pgptr != NULL)
9050  {
9051  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
9052  }
9053 
9054  if (pg_watcher.pgptr == NULL)
9055  {
9056  /* something went wrong, return error */
9057  goto exit_on_error;
9058  }
9059 
9060  slotid = -1;
9061  j = spage_number_of_records (pg_watcher.pgptr);
9062 
9063  last_freespace = spage_get_free_space (thread_p, pg_watcher.pgptr);
9064 
9065  *num_pages += 1;
9066  sum_freespace += last_freespace;
9067  sum_overhead += j * spage_slot_size ();
9068 
9069  while ((j--) > 0)
9070  {
9071  if (spage_next_record (pg_watcher.pgptr, &slotid, &recdes, PEEK) == S_SUCCESS)
9072  {
9073  if (slotid != HEAP_HEADER_AND_CHAIN_SLOTID)
9074  {
9075  type = spage_get_record_type (pg_watcher.pgptr, slotid);
9076  switch (type)
9077  {
9078  case REC_RELOCATION:
9079  *num_recs_relocated += 1;
9080  sum_overhead += spage_get_record_length (thread_p, pg_watcher.pgptr, slotid);
9081  break;
9082  case REC_ASSIGN_ADDRESS:
9083  case REC_HOME:
9084  case REC_NEWHOME:
9085  /*
9086  * Note: for newhome (relocated), we are including the length
9087  * and number of records. In the relocation record (above)
9088  * we are just adding the overhead and number of
9089  * reclocation records.
9090  * for assign address, we assume the given size.
9091  */
9092  *num_recs += 1;
9093  sum_reclength += spage_get_record_length (thread_p, pg_watcher.pgptr, slotid);
9094  break;
9095  case REC_BIGONE:
9096  *num_recs += 1;
9097  *num_recs_inovf += 1;
9098  sum_overhead += spage_get_record_length (thread_p, pg_watcher.pgptr, slotid);
9099 
9100  ovf_oid = (OID *) recdes.data;
9101  if (heap_ovf_get_capacity (thread_p, ovf_oid, &ovf_len, &ovf_num_pages, &ovf_overhead,
9102  &ovf_free_space) == NO_ERROR)
9103  {
9104  sum_reclength += ovf_len;
9105  *num_pages += ovf_num_pages;
9106  sum_freespace += ovf_free_space;
9107  sum_overhead += ovf_overhead;
9108  }
9109  break;
9110  case REC_MARKDELETED:
9111  /*
9112  * TODO Find out and document here why this is added to
9113  * the overhead. The record has been deleted so its
9114  * length should no longer have any meaning. Perhaps
9115  * the length of the slot should have been added instead?
9116  */
9117  sum_overhead += spage_get_record_length (thread_p, pg_watcher.pgptr, slotid);
9118  break;
9120  default:
9121  break;
9122  }
9123  }
9124  }
9125  }
9126  (void) heap_vpid_next (thread_p, hfid, pg_watcher.pgptr, &vpid);
9127  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
9128  }
9129 
9130  if (old_pg_watcher.pgptr != NULL)
9131  {
9132  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
9133  }
9134 
9135  assert (pg_watcher.pgptr == NULL);
9136 
9137  if (*num_pages > 0)
9138  {
9139  /*
9140  * Don't take in consideration the last page for free space
9141  * considerations since the average free space will be contaminated.
9142  */
9143  *avg_freespace_nolast = ((*num_pages > 1) ? (int) ((sum_freespace - last_freespace) / (*num_pages - 1)) : 0);
9144  *avg_freespace = (int) (sum_freespace / *num_pages);
9145  *avg_overhead = (int) (sum_overhead / *num_pages);
9146  }
9147 
9148  if (*num_recs != 0)
9149  {
9150  *avg_reclength = (int) (sum_reclength / *num_recs);
9151  }
9152 
9153  return ret;
9154 
9155 exit_on_error:
9156 
9157  if (old_pg_watcher.pgptr != NULL)
9158  {
9159  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
9160  }
9161  assert (pg_watcher.pgptr == NULL);
9162 
9163  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
9164 }
9165 
9166 /*
9167 * heap_get_class_oid () - Get class for object. This function doesn't follow
9168 * MVCC versions. Caller must know to use right
9169 * version for this.
9170 *
9171 * return : Scan code.
9172 * thread_p (in) : Thread entry.
9173 * oid (in) : Object OID.
9174 * class_oid (out) : Output class OID.
9175 */
9176 SCAN_CODE
9177 heap_get_class_oid (THREAD_ENTRY * thread_p, const OID * oid, OID * class_oid)
9178 {
9179  PGBUF_WATCHER page_watcher;
9180  int err;
9181 
9183 
9184  assert (oid != NULL && !OID_ISNULL (oid) && class_oid != NULL);
9185  OID_SET_NULL (class_oid);
9186 
9187  err = heap_prepare_object_page (thread_p, oid, &page_watcher, PGBUF_LATCH_READ);
9188  if (err != NO_ERROR)
9189  {
9190  /* for non existent object, return S_DOESNT_EXIST and let the caller handle the case; */
9192  }
9193 
9194  /* Get class OID from HEAP_CHAIN. */
9195  if (heap_get_class_oid_from_page (thread_p, page_watcher.pgptr, class_oid) != NO_ERROR)
9196  {
9197  /* Unexpected. */
9198  assert_release (false);
9199  pgbuf_ordered_unfix (thread_p, &page_watcher);
9200  return S_ERROR;
9201  }
9202 
9203  pgbuf_ordered_unfix (thread_p, &page_watcher);
9204  return S_SUCCESS;
9205 }
9206 
9207 /*
9208  * heap_get_class_name () - Find classname when oid is a class
9209  * return: error_code
9210  *
9211  * class_oid(in): The Class Object identifier
9212  * class_name(out): Reference of the Class name pointer where name will reside;
9213  * The classname space must be released by the caller.
9214  *
9215  * Note: Find the name of the given class identifier. It asserts that the given OID is class OID.
9216  *
9217  * Note: Classname pointer must be released by the caller using free_and_init
9218  */
9219 int
9220 heap_get_class_name (THREAD_ENTRY * thread_p, const OID * class_oid, char **class_name)
9221 {
9222  return heap_get_class_name_alloc_if_diff (thread_p, class_oid, NULL, class_name);
9223 }
9224 
9225 /*
9226  * heap_get_class_name_alloc_if_diff () - Get the name of given class
9227  * name is malloc when different than given name
9228  * return: error_code if error(other than ER_HEAP_NODATA_NEWADDRESS) occur
9229  *
9230  * class_oid(in): The Class Object identifier
9231  * guess_classname(in): Guess name of class
9232  * classname_out(out): guess_classname when it is the real name. Don't need to free.
9233  * malloc classname when different from guess_classname.
9234  * Must be free by caller (free_and_init)
9235  * NULL in case of error
9236  *
9237  * Note: Find the name of the given class identifier. If the name is
9238  * the same as the guessed name, the guessed name is returned.
9239  * Otherwise, an allocated area with the name of the class is
9240  * returned.
9241  */
9242 int
9243 heap_get_class_name_alloc_if_diff (THREAD_ENTRY * thread_p, const OID * class_oid, char *guess_classname,
9244  char **classname_out)
9245 {
9246  char *classname = NULL;
9247  RECDES recdes;
9248  HEAP_SCANCACHE scan_cache;
9249  int error_code = NO_ERROR;
9250 
9251  (void) heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
9252 
9253  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, PEEK) == S_SUCCESS)
9254  {
9255  classname = or_class_name (&recdes);
9256  if (guess_classname == NULL || strcmp (guess_classname, classname) != 0)
9257  {
9258  /*
9259  * The names are different.. return a copy that must be freed.
9260  */
9261  *classname_out = strdup (classname);
9262  if (*classname_out == NULL)
9263  {
9265  (strlen (classname) + 1) * sizeof (char));
9266  error_code = ER_FAILED;
9267  }
9268  }
9269  else
9270  {
9271  /*
9272  * The classnames are identical
9273  */
9274  *classname_out = guess_classname;
9275  }
9276  }
9277  else
9278  {
9279  ASSERT_ERROR_AND_SET (error_code);
9280  *classname_out = NULL;
9281  if (error_code == ER_HEAP_NODATA_NEWADDRESS)
9282  {
9283  /* clear ER_HEAP_NODATA_NEWADDRESS */
9284  er_clear ();
9285  error_code = NO_ERROR;
9286  }
9287  }
9288 
9289  heap_scancache_end (thread_p, &scan_cache);
9290 
9291  return error_code;
9292 }
9293 
9294 /*
9295  * heap_attrinfo_start () - Initialize an attribute information structure
9296  * return: NO_ERROR
9297  * class_oid(in): The class identifier of the instances where values
9298  * attributes values are going to be read.
9299  * requested_num_attrs(in): Number of requested attributes
9300  * If <=0 are given, it means interested on ALL.
9301  * attrids(in): Array of requested attributes
9302  * attr_info(in/out): The attribute information structure
9303  *
9304  * Note: Initialize an attribute information structure, so that values
9305  * of instances can be retrieved based on the desired attributes.
9306  * If the requested number of attributes is less than zero,
9307  * all attributes will be assumed instead. In this case
9308  * the attrids array should be NULL.
9309  *
9310  * The attrinfo structure is an structure where values of
9311  * instances can be read. For example an object is retrieved,
9312  * then some of its attributes are convereted to dbvalues and
9313  * placed in this structure.
9314  *
9315  * Note: The caller must call heap_attrinfo_end after he is done with
9316  * attribute information.
9317  */
9318 int
9319 heap_attrinfo_start (THREAD_ENTRY * thread_p, const OID * class_oid, int requested_num_attrs, const ATTR_ID * attrids,
9320  HEAP_CACHE_ATTRINFO * attr_info)
9321 {
9322  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
9323  bool getall; /* Want all attribute values */
9324  int i;
9325  int ret = NO_ERROR;
9326 
9327  if (requested_num_attrs == 0)
9328  {
9329  /* initialize the attrinfo cache and return, there is nothing else to do */
9330  (void) memset (attr_info, '\0', sizeof (HEAP_CACHE_ATTRINFO));
9331 
9332  /* now set the num_values to -1 which indicates that this is an empty HEAP_CACHE_ATTRINFO and shouldn't be
9333  * operated on. */
9334  attr_info->num_values = -1;
9335  return NO_ERROR;
9336  }
9337 
9338  if (requested_num_attrs < 0)
9339  {
9340  getall = true;
9341  }
9342  else
9343  {
9344  getall = false;
9345  }
9346 
9347  /*
9348  * initialize attribute information
9349  *
9350  */
9351 
9352  attr_info->class_oid = *class_oid;
9353  attr_info->last_cacheindex = -1;
9354  attr_info->read_cacheindex = -1;
9355 
9356  attr_info->last_classrepr = NULL;
9357  attr_info->read_classrepr = NULL;
9358 
9359  OID_SET_NULL (&attr_info->inst_oid);
9360  attr_info->inst_chn = NULL_CHN;
9361  attr_info->values = NULL;
9362  attr_info->num_values = -1; /* initialize attr_info */
9363 
9364  /*
9365  * Find the most recent representation of the instances of the class, and
9366  * cache the structure that describe the attributes of this representation.
9367  * At the same time find the default values of attributes, the shared
9368  * attribute values and the class attribute values.
9369  */
9370 
9371  attr_info->last_classrepr =
9372  heap_classrepr_get (thread_p, &attr_info->class_oid, NULL, NULL_REPRID, &attr_info->last_cacheindex);
9373  if (attr_info->last_classrepr == NULL)
9374  {
9375  goto exit_on_error;
9376  }
9377 
9378  /*
9379  * If the requested attributes is < 0, get all attributes of the last
9380  * representation.
9381  */
9382 
9383  if (requested_num_attrs < 0)
9384  {
9385  requested_num_attrs = attr_info->last_classrepr->n_attributes;
9386  }
9387  else if (requested_num_attrs >
9388  (attr_info->last_classrepr->n_attributes + attr_info->last_classrepr->n_shared_attrs +
9389  attr_info->last_classrepr->n_class_attrs))
9390  {
9391  fprintf (stdout, " XXX There are not that many attributes. Num_attrs = %d, Num_requested_attrs = %d\n",
9392  attr_info->last_classrepr->n_attributes, requested_num_attrs);
9393  requested_num_attrs =
9394  attr_info->last_classrepr->n_attributes + attr_info->last_classrepr->n_shared_attrs +
9395  attr_info->last_classrepr->n_class_attrs;
9396  }
9397 
9398  if (requested_num_attrs > 0)
9399  {
9400  attr_info->values =
9401  (HEAP_ATTRVALUE *) db_private_alloc (thread_p, requested_num_attrs * sizeof (*(attr_info->values)));
9402  if (attr_info->values == NULL)
9403  {
9404  goto exit_on_error;
9405  }
9406  }
9407  else
9408  {
9409  attr_info->values = NULL;
9410  }
9411 
9412  attr_info->num_values = requested_num_attrs;
9413 
9414  /*
9415  * Set the attribute identifier of the desired attributes in the value
9416  * attribute information, and indicates that the current value is
9417  * unitialized. That is, it has not been read, set or whatever.
9418  */
9419 
9420  for (i = 0; i < attr_info->num_values; i++)
9421  {
9422  value = &attr_info->values[i];
9423  if (getall == true)
9424  {
9425  value->attrid = -1;
9426  }
9427  else
9428  {
9429  value->attrid = *attrids++;
9430  }
9431  value->state = HEAP_UNINIT_ATTRVALUE;
9432  value->do_increment = 0;
9433  value->last_attrepr = NULL;
9434  value->read_attrepr = NULL;
9435  }
9436 
9437  /*
9438  * Make last information to be recached for each individual attribute
9439  * value. Needed for WRITE and Default values
9440  */
9441 
9442  if (heap_attrinfo_recache_attrepr (attr_info, true) != NO_ERROR)
9443  {
9444  goto exit_on_error;
9445  }
9446 
9447  return ret;
9448 
9449 exit_on_error:
9450 
9451  heap_attrinfo_end (thread_p, attr_info);
9452 
9453  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
9454 }
9455 
9456 #if 0 /* TODO: remove unused */
9457 /*
9458  * heap_moreattr_attrinfo () - Add another attribute to the attribute information
9459  * cache
9460  * return: NO_ERROR
9461  * attrid(in): The information of the attribute that will be needed
9462  * attr_info(in/out): The attribute information structure
9463  *
9464  * Note: The given attribute is included as part of the reading or
9465  * transformation process.
9466  */
9467 static int
9468 heap_moreattr_attrinfo (int attrid, HEAP_CACHE_ATTRINFO * attr_info)
9469 {
9470  HEAP_ATTRVALUE *new_values; /* The new value attribute array */
9471  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
9472  int i;
9473  int ret = NO_ERROR;
9474 
9475  /*
9476  * If we get an empty HEAP_CACHE_ATTRINFO, this is an error. We can
9477  * not add more attributes to an improperly initialized HEAP_CACHE_ATTRINFO
9478  * structure.
9479  */
9480  if (attr_info->num_values == -1)
9481  {
9482  return ER_FAILED;
9483  }
9484 
9485  /*
9486  * Make sure that the attribute is not already included
9487  */
9488  for (i = 0; i < attr_info->num_values; i++)
9489  {
9490  value = &attr_info->values[i];
9491  if (value != NULL && value->attrid == attrid)
9492  {
9493  return NO_ERROR;
9494  }
9495  }
9496 
9497  /*
9498  * Resize the value attribute array and set the attribute identifier as
9499  * as part of the desired attribute list
9500  */
9501  i = (attr_info->num_values + 1) * sizeof (*(attr_info->values));
9502 
9503  new_values = (HEAP_ATTRVALUE *) db_private_realloc (NULL, attr_info->values, i);
9504  if (new_values == NULL)
9505  {
9506  goto exit_on_error;
9507  }
9508 
9509  attr_info->values = new_values;
9510 
9511  value = &attr_info->values[attr_info->num_values];
9512  value->attrid = attrid;
9513  value->state = HEAP_UNINIT_ATTRVALUE;
9514  value->last_attrepr = NULL;
9515  value->read_attrepr = NULL;
9516  attr_info->num_values++;
9517 
9518  /*
9519  * Recache attribute representation and get default value specifications
9520  * for new attribute. The default values are located on the last
9521  * representation
9522  */
9523 
9524  if (heap_attrinfo_recache_attrepr (attr_info, true) != NO_ERROR
9526  value->read_attrepr->domain->scale) != NO_ERROR)
9527  {
9528  attr_info->num_values--;
9529  value->attrid = -1;
9530  goto exit_on_error;
9531  }
9532 
9533 end:
9534 
9535  return ret;
9536 
9537 exit_on_error:
9538 
9539  assert (ret != NO_ERROR);
9540  if (ret == NO_ERROR)
9541  {
9542  assert (er_errid () != NO_ERROR);
9543  ret = er_errid ();
9544  if (ret == NO_ERROR)
9545  {
9546  ret = ER_FAILED;
9547  }
9548  }
9549  goto end;
9550 }
9551 #endif
9552 
9553 /*
9554  * heap_attrinfo_recache_attrepr () - Recache attribute information for given attrinfo for
9555  * each attribute value
9556  * return: NO_ERROR
9557  * attr_info(in/out): The attribute information structure
9558  * islast_reset(in): Are we resetting information for last representation.
9559  *
9560  * Note: Recache the attribute information for given representation
9561  * identifier of the class in attr_info for each attribute value.
9562  * That is, set each attribute information to point to disk
9563  * related attribute information for given representation
9564  * identifier.
9565  * When we are resetting information for last representation,
9566  * attribute values are also initialized.
9567  */
9568 
9569 static int
9570 heap_attrinfo_recache_attrepr (HEAP_CACHE_ATTRINFO * attr_info, bool islast_reset)
9571 {
9572  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
9573  int num_found_attrs; /* Num of found attributes */
9574  int srch_num_attrs; /* Num of attributes that can be searched */
9575  int srch_num_shared; /* Num of shared attrs that can be searched */
9576  int srch_num_class; /* Num of class attrs that can be searched */
9577  OR_ATTRIBUTE *search_attrepr; /* Information for disk attribute */
9578  int i, curr_attr;
9579  bool isattr_found;
9580  int ret = NO_ERROR;
9581 
9582  /*
9583  * Initialize the value domain for dbvalues of all desired attributes
9584  */
9585  if (islast_reset == true)
9586  {
9587  srch_num_attrs = attr_info->last_classrepr->n_attributes;
9588  }
9589  else
9590  {
9591  srch_num_attrs = attr_info->read_classrepr->n_attributes;
9592  }
9593 
9594  /* shared and class attributes must always use the latest representation */
9595  srch_num_shared = attr_info->last_classrepr->n_shared_attrs;
9596  srch_num_class = attr_info->last_classrepr->n_class_attrs;
9597 
9598  for (num_found_attrs = 0, curr_attr = 0; curr_attr < attr_info->num_values; curr_attr++)
9599  {
9600  /*
9601  * Go over the list of attributes (instance, shared, and class attrs)
9602  * until the desired attribute is found
9603  */
9604  isattr_found = false;
9605  if (islast_reset == true)
9606  {
9607  search_attrepr = attr_info->last_classrepr->attributes;
9608  }
9609  else
9610  {
9611  search_attrepr = attr_info->read_classrepr->attributes;
9612  }
9613 
9614  value = &attr_info->values[curr_attr];
9615 
9616  if (value->attrid == -1)
9617  {
9618  /* Case that we want all attributes */
9619  value->attrid = search_attrepr[curr_attr].id;
9620  }
9621 
9622  for (i = 0; isattr_found == false && i < srch_num_attrs; i++, search_attrepr++)
9623  {
9624  /*
9625  * Is this a desired instance attribute?
9626  */
9627  if (value->attrid == search_attrepr->id)
9628  {
9629  /*
9630  * Found it.
9631  * Initialize the attribute value information
9632  */
9633  isattr_found = true;
9634  value->attr_type = HEAP_INSTANCE_ATTR;
9635  if (islast_reset == true)
9636  {
9637  value->last_attrepr = search_attrepr;
9638  /*
9639  * The server does not work with DB_TYPE_OBJECT but DB_TYPE_OID
9640  */
9641  if (value->last_attrepr->type == DB_TYPE_OBJECT)
9642  {
9643  value->last_attrepr->type = DB_TYPE_OID;
9644  }
9645 
9646  if (value->state == HEAP_UNINIT_ATTRVALUE)
9647  {
9648  db_value_domain_init (&value->dbvalue, value->last_attrepr->type,
9649  value->last_attrepr->domain->precision, value->last_attrepr->domain->scale);
9650  }
9651  }
9652  else
9653  {
9654  value->read_attrepr = search_attrepr;
9655  /*
9656  * The server does not work with DB_TYPE_OBJECT but DB_TYPE_OID
9657  */
9658  if (value->read_attrepr->type == DB_TYPE_OBJECT)
9659  {
9660  value->read_attrepr->type = DB_TYPE_OID;
9661  }
9662  }
9663 
9664  num_found_attrs++;
9665  }
9666  }
9667 
9668  /*
9669  * if the desired attribute was not found in the instance attributes,
9670  * look for it in the shared attributes. We always use the last_repr
9671  * for shared attributes.
9672  */
9673 
9674  for (i = 0, search_attrepr = attr_info->last_classrepr->shared_attrs;
9675  isattr_found == false && i < srch_num_shared; i++, search_attrepr++)
9676  {
9677  /*
9678  * Is this a desired shared attribute?
9679  */
9680  if (value->attrid == search_attrepr->id)
9681  {
9682  /*
9683  * Found it.
9684  * Initialize the attribute value information
9685  */
9686  isattr_found = true;
9687  value->attr_type = HEAP_SHARED_ATTR;
9688  value->last_attrepr = search_attrepr;
9689  /*
9690  * The server does not work with DB_TYPE_OBJECT but DB_TYPE_OID
9691  */
9692  if (value->last_attrepr->type == DB_TYPE_OBJECT)
9693  {
9694  value->last_attrepr->type = DB_TYPE_OID;
9695  }
9696 
9697  if (value->state == HEAP_UNINIT_ATTRVALUE)
9698  {
9699  db_value_domain_init (&value->dbvalue, value->last_attrepr->type,
9700  value->last_attrepr->domain->precision, value->last_attrepr->domain->scale);
9701  }
9702  num_found_attrs++;
9703  }
9704  }
9705 
9706  /*
9707  * if the desired attribute was not found in the instance/shared atttrs,
9708  * look for it in the class attributes. We always use the last_repr
9709  * for class attributes.
9710  */
9711 
9712  for (i = 0, search_attrepr = attr_info->last_classrepr->class_attrs; isattr_found == false && i < srch_num_class;
9713  i++, search_attrepr++)
9714  {
9715  /*
9716  * Is this a desired class attribute?
9717  */
9718 
9719  if (value->attrid == search_attrepr->id)
9720  {
9721  /*
9722  * Found it.
9723  * Initialize the attribute value information
9724  */
9725  isattr_found = true;
9726  value->attr_type = HEAP_CLASS_ATTR;
9727  if (islast_reset == true)
9728  {
9729  value->last_attrepr = search_attrepr;
9730  }
9731  else
9732  {
9733  value->read_attrepr = search_attrepr;
9734  }
9735  /*
9736  * The server does not work with DB_TYPE_OBJECT but DB_TYPE_OID
9737  */
9738  if (value->last_attrepr->type == DB_TYPE_OBJECT)
9739  {
9740  value->last_attrepr->type = DB_TYPE_OID;
9741  }
9742 
9743  if (value->state == HEAP_UNINIT_ATTRVALUE)
9744  {
9745  db_value_domain_init (&value->dbvalue, value->last_attrepr->type,
9746  value->last_attrepr->domain->precision, value->last_attrepr->domain->scale);
9747  }
9748  num_found_attrs++;
9749  }
9750  }
9751  }
9752 
9753  if (num_found_attrs != attr_info->num_values && islast_reset == true)
9754  {
9755  ret = ER_HEAP_UNKNOWN_ATTRS;
9756  er_set (ER_FATAL_ERROR_SEVERITY, ARG_FILE_LINE, ret, 1, attr_info->num_values - num_found_attrs);
9757  goto exit_on_error;
9758  }
9759 
9760  return ret;
9761 
9762 exit_on_error:
9763 
9764  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
9765 }
9766 
9767 /*
9768  * heap_attrinfo_recache () - Recache attribute information for given attrinfo
9769  * return: NO_ERROR
9770  * reprid(in): Cache this class representation
9771  * attr_info(in/out): The attribute information structure
9772  *
9773  * Note: Recache the attribute information for given representation
9774  * identifier of the class in attr_info. That is, set each
9775  * attribute information to point to disk related attribute
9776  * information for given representation identifier.
9777  */
9778 static int
9779 heap_attrinfo_recache (THREAD_ENTRY * thread_p, REPR_ID reprid, HEAP_CACHE_ATTRINFO * attr_info)
9780 {
9781  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
9782  int i;
9783  int ret = NO_ERROR;
9784 
9785  /*
9786  * If we do not need to cache anything (case of only clear values and
9787  * disk repr structure).. return
9788  */
9789 
9790  if (attr_info->read_classrepr != NULL)
9791  {
9792  if (attr_info->read_classrepr->id == reprid)
9793  {
9794  return NO_ERROR;
9795  }
9796 
9797  /*
9798  * Do we need to free the current cached disk representation ?
9799  */
9800  if (attr_info->read_classrepr != attr_info->last_classrepr)
9801  {
9803  }
9804  attr_info->read_classrepr = NULL;
9805  }
9806 
9807  if (reprid == NULL_REPRID)
9808  {
9809  return NO_ERROR;
9810  }
9811 
9812  if (reprid == attr_info->last_classrepr->id)
9813  {
9814  /*
9815  * Take a short cut
9816  */
9817  if (attr_info->values != NULL)
9818  {
9819  for (i = 0; i < attr_info->num_values; i++)
9820  {
9821  value = &attr_info->values[i];
9822  value->read_attrepr = value->last_attrepr;
9823  }
9824  }
9825  attr_info->read_classrepr = attr_info->last_classrepr;
9826  attr_info->read_cacheindex = -1; /* Don't need to free this one */
9827  return NO_ERROR;
9828  }
9829 
9830  /*
9831  * Cache the desired class representation information
9832  */
9833  if (attr_info->values != NULL)
9834  {
9835  for (i = 0; i < attr_info->num_values; i++)
9836  {
9837  value = &attr_info->values[i];
9838  value->read_attrepr = NULL;
9839  }
9840  }
9841  attr_info->read_classrepr =
9842  heap_classrepr_get (thread_p, &attr_info->class_oid, NULL, reprid, &attr_info->read_cacheindex);
9843  if (attr_info->read_classrepr == NULL)
9844  {
9845  goto exit_on_error;
9846  }
9847 
9848  if (heap_attrinfo_recache_attrepr (attr_info, false) != NO_ERROR)
9849  {
9851 
9852  goto exit_on_error;
9853  }
9854 
9855  return ret;
9856 
9857 exit_on_error:
9858 
9859  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
9860 }
9861 
9862 /*
9863  * heap_attrinfo_end () - Done with attribute information structure
9864  * return: void
9865  * attr_info(in/out): The attribute information structure
9866  *
9867  * Note: Release any memory allocated for attribute information related
9868  * reading of instances.
9869  */
9870 void
9872 {
9873  int ret = NO_ERROR;
9874 
9875  /* check to make sure the attr_info has been used */
9876  if (attr_info->num_values == -1)
9877  {
9878  return;
9879  }
9880 
9881  /*
9882  * Free any attribute and class representation information
9883  */
9884  ret = heap_attrinfo_clear_dbvalues (attr_info);
9885  ret = heap_attrinfo_recache (thread_p, NULL_REPRID, attr_info);
9886 
9887  if (attr_info->last_classrepr != NULL)
9888  {
9890  }
9891 
9892  if (attr_info->values)
9893  {
9894  db_private_free_and_init (thread_p, attr_info->values);
9895  }
9896  OID_SET_NULL (&attr_info->class_oid);
9897 
9898  /*
9899  * Bash this so that we ensure that heap_attrinfo_end is idempotent.
9900  */
9901  attr_info->num_values = -1;
9902 
9903 }
9904 
9905 /*
9906  * heap_attrinfo_clear_dbvalues () - Clear current dbvalues of attribute
9907  * information
9908  * return: NO_ERROR
9909  * attr_info(in/out): The attribute information structure
9910  *
9911  * Note: Clear any current dbvalues associated with attribute information.
9912  */
9913 int
9915 {
9916  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
9917  OR_ATTRIBUTE *attrepr; /* Which one current repr of default one */
9918  int i;
9919  int ret = NO_ERROR;
9920 
9921  /* check to make sure the attr_info has been used */
9922  if (attr_info->num_values == -1)
9923  {
9924  return NO_ERROR;
9925  }
9926 
9927  if (attr_info->values != NULL)
9928  {
9929  for (i = 0; i < attr_info->num_values; i++)
9930  {
9931  value = &attr_info->values[i];
9932  if (value->state != HEAP_UNINIT_ATTRVALUE)
9933  {
9934  /*
9935  * Was the value set up from a default value or from a representation
9936  * of the object
9937  */
9938  attrepr = ((value->read_attrepr != NULL) ? value->read_attrepr : value->last_attrepr);
9939  if (attrepr != NULL)
9940  {
9941  if (pr_clear_value (&value->dbvalue) != NO_ERROR)
9942  {
9943  ret = ER_FAILED;
9944  }
9945  value->state = HEAP_UNINIT_ATTRVALUE;
9946  }
9947  }
9948  }
9949  }
9950  OID_SET_NULL (&attr_info->inst_oid);
9951  attr_info->inst_chn = NULL_CHN;
9952 
9953  return ret;
9954 }
9955 
9956 /*
9957  * heap_attrvalue_read () - Read attribute information of given attribute cache
9958  * and instance
9959  * return: NO_ERROR
9960  * recdes(in): Instance record descriptor
9961  * value(in): Disk value attribute information
9962  * attr_info(in/out): The attribute information structure
9963  *
9964  * Note: Read the dbvalue of the given value attribute information.
9965  */
9966 static int
9967 heap_attrvalue_read (RECDES * recdes, HEAP_ATTRVALUE * value, HEAP_CACHE_ATTRINFO * attr_info)
9968 {
9969  OR_BUF buf;
9970  PR_TYPE *pr_type; /* Primitive type array function structure */
9971  OR_ATTRIBUTE *volatile attrepr;
9972  char *disk_data = NULL;
9973  int disk_bound = false;
9974  volatile int disk_length = -1;
9975  int ret = NO_ERROR;
9976 
9977  /* Initialize disk value information */
9978  disk_data = NULL;
9979  disk_bound = false;
9980  disk_length = -1;
9981 
9982  /*
9983  * Does attribute exist in this disk representation?
9984  */
9985 
9986  if (recdes == NULL || recdes->data == NULL || value->read_attrepr == NULL || value->attr_type == HEAP_SHARED_ATTR
9987  || value->attr_type == HEAP_CLASS_ATTR)
9988  {
9989  /*
9990  * Either the attribute is a shared or class attr, or the attribute
9991  * does not exist in this disk representation, or we do not have
9992  * the disk object (recdes), get default value if any...
9993  */
9994  attrepr = value->last_attrepr;
9995  disk_length = value->last_attrepr->default_value.val_length;
9996  if (disk_length > 0)
9997  {
9998  disk_data = (char *) value->last_attrepr->default_value.value;
9999  disk_bound = true;
10000  }
10001  }
10002  else
10003  {
10004  attrepr = value->read_attrepr;
10005  /* Is it a fixed size attribute ? */
10006  if (value->read_attrepr->is_fixed != 0)
10007  {
10008  /*
10009  * A fixed attribute.
10010  */
10012  attr_info->read_classrepr->fixed_length, value->read_attrepr->position))
10013  {
10014  /*
10015  * The fixed attribute is bound. Access its information
10016  */
10017  disk_data =
10018  ((char *) recdes->data
10020  attr_info->read_classrepr->n_variable)
10021  + value->read_attrepr->location);
10022  disk_length = tp_domain_disk_size (value->read_attrepr->domain);
10023  disk_bound = true;
10024  }
10025  }
10026  else
10027  {
10028  /*
10029  * A variable attribute
10030  */
10031  if (!OR_VAR_IS_NULL (recdes->data, value->read_attrepr->location))
10032  {
10033  /*
10034  * The variable attribute is bound.
10035  * Find its location through the variable offset attribute table.
10036  */
10037  disk_data = ((char *) recdes->data + OR_VAR_OFFSET (recdes->data, value->read_attrepr->location));
10038 
10039  disk_bound = true;
10040  switch (TP_DOMAIN_TYPE (attrepr->domain))
10041  {
10042  case DB_TYPE_BLOB:
10043  case DB_TYPE_CLOB:
10044  case DB_TYPE_SET: /* it may be just a little bit fast */
10045  case DB_TYPE_MULTISET:
10046  case DB_TYPE_SEQUENCE:
10047  OR_VAR_LENGTH (disk_length, recdes->data, value->read_attrepr->location,
10048  attr_info->read_classrepr->n_variable);
10049  break;
10050  default:
10051  disk_length = -1; /* remains can read without disk_length */
10052  }
10053  }
10054  }
10055  }
10056 
10057  /*
10058  * From now on, I should only use attrepr.. it will point to either
10059  * a current value or a default one
10060  */
10061 
10062  /*
10063  * Clear/decache any old value
10064  */
10065  if (value->state != HEAP_UNINIT_ATTRVALUE)
10066  {
10067  (void) pr_clear_value (&value->dbvalue);
10068  }
10069 
10070 
10071  /*
10072  * Now make the dbvalue according to the disk data value
10073  */
10074 
10075  if (disk_data == NULL || disk_bound == false)
10076  {
10077  /* Unbound attribute, set it to null value */
10078  ret = db_value_domain_init (&value->dbvalue, attrepr->type, attrepr->domain->precision, attrepr->domain->scale);
10079  if (ret != NO_ERROR)
10080  {
10081  goto exit_on_error;
10082  }
10083  value->state = HEAP_READ_ATTRVALUE;
10084  }
10085  else
10086  {
10087  /*
10088  * Read the value according to disk information that was found
10089  */
10090  OR_BUF_INIT2 (buf, disk_data, disk_length);
10091  buf.error_abort = 1;
10092 
10093  switch (_setjmp (buf.env))
10094  {
10095  case 0:
10096  /* Do not copy the string--just use the pointer. The pr_ routines for strings and sets have different
10097  * semantics for length. A negative length value for strings means "don't copy the string, just use the
10098  * pointer". For sets, don't translate the set into memory representation at this time. It will only be
10099  * translated when needed. */
10100  pr_type = PR_TYPE_FROM_ID (attrepr->type);
10101  if (pr_type)
10102  {
10103  (*(pr_type->data_readval)) (&buf, &value->dbvalue, attrepr->domain, disk_length, false, NULL, 0);
10104  }
10105  value->state = HEAP_READ_ATTRVALUE;
10106  break;
10107  default:
10108  /*
10109  * An error was found during the reading of the attribute value
10110  */
10111  (void) db_value_domain_init (&value->dbvalue, attrepr->type, attrepr->domain->precision,
10112  attrepr->domain->scale);
10113  value->state = HEAP_UNINIT_ATTRVALUE;
10114  ret = ER_FAILED;
10115  break;
10116  }
10117  }
10118 
10119  return ret;
10120 
10121 exit_on_error:
10122 
10123  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
10124 }
10125 
10126 /*
10127  * heap_midxkey_get_value () -
10128  * return:
10129  * recdes(in):
10130  * att(in):
10131  * value(out):
10132  * attr_info(in):
10133  */
10134 static int
10135 heap_midxkey_get_value (RECDES * recdes, OR_ATTRIBUTE * att, DB_VALUE * value, HEAP_CACHE_ATTRINFO * attr_info)
10136 {
10137  char *disk_data = NULL;
10138  bool found = true; /* Does attribute(att) exist in this disk representation? */
10139  int i;
10140 
10141  /* Initialize disk value information */
10142  disk_data = NULL;
10143  db_make_null (value);
10144 
10145  if (recdes != NULL && recdes->data != NULL && att != NULL)
10146  {
10147  if (or_rep_id (recdes) != attr_info->last_classrepr->id)
10148  {
10149  found = false;
10150  for (i = 0; i < attr_info->read_classrepr->n_attributes; i++)
10151  {
10152  if (attr_info->read_classrepr->attributes[i].id == att->id)
10153  {
10154  att = &attr_info->read_classrepr->attributes[i];
10155  found = true;
10156  break;
10157  }
10158  }
10159  }
10160 
10161  if (found == false)
10162  {
10163  /* It means that the representation has an attribute which was created after insertion of the record. In this
10164  * case, return the default value of the attribute if it exists. */
10165  if (att->default_value.val_length > 0)
10166  {
10167  disk_data = (char *) att->default_value.value;
10168  }
10169  }
10170  else
10171  {
10172  /* Is it a fixed size attribute ? */
10173  if (att->is_fixed != 0)
10174  { /* A fixed attribute. */
10176  attr_info->read_classrepr->fixed_length, att->position))
10177  {
10178  /* The fixed attribute is bound. Access its information */
10179  disk_data =
10180  ((char *) recdes->data +
10182  attr_info->read_classrepr->n_variable) + att->location);
10183  }
10184  }
10185  else
10186  { /* A variable attribute */
10187  if (!OR_VAR_IS_NULL (recdes->data, att->location))
10188  {
10189  /* The variable attribute is bound. Find its location through the variable offset attribute table. */
10190  disk_data = ((char *) recdes->data + OR_VAR_OFFSET (recdes->data, att->location));
10191  }
10192  }
10193  }
10194  }
10195  else
10196  {
10197  assert (0);
10198  return ER_FAILED;
10199  }
10200 
10201  if (disk_data != NULL)
10202  {
10203  OR_BUF buf;
10204 
10205  or_init (&buf, disk_data, -1);
10206  (*(att->domain->type->data_readval)) (&buf, value, att->domain, -1, false, NULL, 0);
10207  }
10208 
10209  return NO_ERROR;
10210 }
10211 
10212 /*
10213  * heap_attrinfo_read_dbvalues () - Find db_values of desired attributes of given
10214  * instance
10215  * return: NO_ERROR
10216  * inst_oid(in): The instance oid
10217  * recdes(in): The instance Record descriptor
10218  * attr_info(in/out): The attribute information structure which describe the
10219  * desired attributes
10220  *
10221  * Note: Find DB_VALUES of desired attributes of given instance.
10222  * The attr_info structure must have already been initialized
10223  * with the desired attributes.
10224  *
10225  * If the inst_oid and the recdes are NULL, then we must be
10226  * reading only shared and/or class attributes which are found
10227  * in the last representation.
10228  */
10229 int
10230 heap_attrinfo_read_dbvalues (THREAD_ENTRY * thread_p, const OID * inst_oid, RECDES * recdes,
10231  HEAP_SCANCACHE * scan_cache, HEAP_CACHE_ATTRINFO * attr_info)
10232 {
10233  int i;
10234  REPR_ID reprid; /* The disk representation of the object */
10235  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10236  int ret = NO_ERROR;
10237 
10238  /* check to make sure the attr_info has been used */
10239  if (attr_info->num_values == -1)
10240  {
10241  return NO_ERROR;
10242  }
10243 
10244  /*
10245  * Make sure that we have the needed cached representation.
10246  */
10247 
10248  if (inst_oid != NULL && recdes != NULL && recdes->data != NULL)
10249  {
10250  reprid = or_rep_id (recdes);
10251 
10252  if (attr_info->read_classrepr == NULL || attr_info->read_classrepr->id != reprid)
10253  {
10254  /* Get the needed representation */
10255  ret = heap_attrinfo_recache (thread_p, reprid, attr_info);
10256  if (ret != NO_ERROR)
10257  {
10258  goto exit_on_error;
10259  }
10260  }
10261  }
10262 
10263  /*
10264  * Go over each attribute and read it
10265  */
10266 
10267  for (i = 0; i < attr_info->num_values; i++)
10268  {
10269  value = &attr_info->values[i];
10270  ret = heap_attrvalue_read (recdes, value, attr_info);
10271  if (ret != NO_ERROR)
10272  {
10273  goto exit_on_error;
10274  }
10275  }
10276 
10277  /*
10278  * Cache the information of the instance
10279  */
10280  if (inst_oid != NULL && recdes != NULL && recdes->data != NULL)
10281  {
10282  attr_info->inst_chn = or_chn (recdes);
10283  attr_info->inst_oid = *inst_oid;
10284  }
10285 
10286  return ret;
10287 
10288 exit_on_error:
10289 
10290  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
10291 }
10292 
10293 int
10295 {
10296  int i;
10297  REPR_ID reprid; /* The disk representation of the object */
10298  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10299  int ret = NO_ERROR;
10300 
10301  /* check to make sure the attr_info has been used */
10302  if (attr_info->num_values == -1)
10303  {
10304  return NO_ERROR;
10305  }
10306 
10307  /*
10308  * Make sure that we have the needed cached representation.
10309  */
10310 
10311  if (recdes != NULL)
10312  {
10313  reprid = or_rep_id (recdes);
10314 
10315  if (attr_info->read_classrepr == NULL || attr_info->read_classrepr->id != reprid)
10316  {
10317  /* Get the needed representation */
10318  ret = heap_attrinfo_recache (thread_p, reprid, attr_info);
10319  if (ret != NO_ERROR)
10320  {
10321  goto exit_on_error;
10322  }
10323  }
10324  }
10325 
10326  /*
10327  * Go over each attribute and read it
10328  */
10329 
10330  for (i = 0; i < attr_info->num_values; i++)
10331  {
10332  value = &attr_info->values[i];
10333  ret = heap_attrvalue_read (recdes, value, attr_info);
10334  if (ret != NO_ERROR)
10335  {
10336  goto exit_on_error;
10337  }
10338  }
10339 
10340  return ret;
10341 
10342 exit_on_error:
10343 
10344  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
10345 }
10346 
10347 
10348 /*
10349  * heap_attrinfo_delete_lob ()
10350  * return: NO_ERROR
10351  * thread_p(in):
10352  * recdes(in): The instance Record descriptor
10353  * attr_info(in): The attribute information structure which describe the
10354  * desired attributes
10355  *
10356  */
10357 int
10359 {
10360  int i;
10361  HEAP_ATTRVALUE *value;
10362  int ret = NO_ERROR;
10363 
10364  assert (attr_info != NULL);
10365  assert (attr_info->num_values > 0);
10366 
10367  /*
10368  * Make sure that we have the needed cached representation.
10369  */
10370 
10371  if (recdes != NULL)
10372  {
10373  REPR_ID reprid;
10374  reprid = or_rep_id (recdes);
10375  if (attr_info->read_classrepr == NULL || attr_info->read_classrepr->id != reprid)
10376  {
10377  /* Get the needed representation */
10378  ret = heap_attrinfo_recache (thread_p, reprid, attr_info);
10379  if (ret != NO_ERROR)
10380  {
10381  goto exit_on_error;
10382  }
10383  }
10384  }
10385 
10386  /*
10387  * Go over each attribute and delete the data if it's lob type
10388  */
10389 
10390  for (i = 0; i < attr_info->num_values; i++)
10391  {
10392  value = &attr_info->values[i];
10393  if (value->last_attrepr->type == DB_TYPE_BLOB || value->last_attrepr->type == DB_TYPE_CLOB)
10394  {
10395  if (value->state == HEAP_UNINIT_ATTRVALUE && recdes != NULL)
10396  {
10397  ret = heap_attrvalue_read (recdes, value, attr_info);
10398  if (ret != NO_ERROR)
10399  {
10400  goto exit_on_error;
10401  }
10402  }
10403  if (!db_value_is_null (&value->dbvalue))
10404  {
10405  DB_ELO *elo;
10407  || db_value_type (&value->dbvalue) == DB_TYPE_CLOB);
10408  elo = db_get_elo (&value->dbvalue);
10409  if (elo)
10410  {
10411  ret = db_elo_delete (elo);
10412  }
10413  value->state = HEAP_WRITTEN_ATTRVALUE;
10414  }
10415  }
10416  }
10417 
10418  return ret;
10419 
10420 exit_on_error:
10421 
10422  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
10423 }
10424 
10425 /*
10426  * heap_attrinfo_dump () - Dump value of attribute information
10427  * return:
10428  * attr_info(in): The attribute information structure
10429  * dump_schema(in):
10430  *
10431  * Note: Dump attribute value of given attribute information.
10432  */
10433 void
10434 heap_attrinfo_dump (THREAD_ENTRY * thread_p, FILE * fp, HEAP_CACHE_ATTRINFO * attr_info, bool dump_schema)
10435 {
10436  int i;
10437  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10438  int ret = NO_ERROR;
10439 
10440  /* check to make sure the attr_info has been used */
10441  if (attr_info->num_values == -1)
10442  {
10443  fprintf (fp, " Empty attrinfo\n");
10444  return;
10445  }
10446 
10447  /*
10448  * Dump attribute schema information
10449  */
10450 
10451  if (dump_schema == true)
10452  {
10453  ret = heap_classrepr_dump (thread_p, fp, &attr_info->class_oid, attr_info->read_classrepr);
10454  }
10455 
10456  for (i = 0; i < attr_info->num_values; i++)
10457  {
10458  value = &attr_info->values[i];
10459  fprintf (fp, " Attrid = %d, state = %d, type = %s\n", value->attrid, value->state,
10460  pr_type_name (value->read_attrepr->type));
10461  /*
10462  * Dump the value in memory format
10463  */
10464 
10465  fprintf (fp, " Memory_value_format:\n");
10466  fprintf (fp, " value = ");
10467  db_value_fprint (fp, &value->dbvalue);
10468  fprintf (fp, "\n\n");
10469  }
10470 
10471 }
10472 
10473 /*
10474  * heap_attrvalue_locate () - Locate disk attribute value information
10475  * return: attrvalue or NULL
10476  * attrid(in): The desired attribute identifier
10477  * attr_info(in/out): The attribute information structure which describe the
10478  * desired attributes
10479  *
10480  * Note: Locate the disk attribute value information of an attribute
10481  * information structure which have been already initialized.
10482  */
10485 {
10486  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10487  int i;
10488 
10489  for (i = 0, value = attr_info->values; i < attr_info->num_values; i++, value++)
10490  {
10491  if (attrid == value->attrid)
10492  {
10493  return value;
10494  }
10495  }
10496 
10497  return NULL;
10498 }
10499 
10500 /*
10501  * heap_locate_attribute () -
10502  * return:
10503  * attrid(in):
10504  * attr_info(in):
10505  */
10506 static OR_ATTRIBUTE *
10507 heap_locate_attribute (ATTR_ID attrid, HEAP_CACHE_ATTRINFO * attr_info)
10508 {
10509  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10510  int i;
10511 
10512  for (i = 0, value = attr_info->values; i < attr_info->num_values; i++, value++)
10513  {
10514  if (attrid == value->attrid)
10515  {
10516  /* Some altered attributes might have only the last representations of them. */
10517  return (value->read_attrepr != NULL) ? value->read_attrepr : value->last_attrepr;
10518  }
10519  }
10520 
10521  return NULL;
10522 }
10523 
10524 /*
10525  * heap_locate_last_attrepr () -
10526  * return:
10527  * attrid(in):
10528  * attr_info(in):
10529  */
10530 OR_ATTRIBUTE *
10532 {
10533  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10534  int i;
10535 
10536  for (i = 0, value = attr_info->values; i < attr_info->num_values; i++, value++)
10537  {
10538  if (attrid == value->attrid)
10539  {
10540  return value->last_attrepr;
10541  }
10542  }
10543 
10544  return NULL;
10545 }
10546 
10547 /*
10548  * heap_attrinfo_access () - Access an attribute value which has been already read
10549  * return:
10550  * attrid(in): The desired attribute identifier
10551  * attr_info(in/out): The attribute information structure which describe the
10552  * desired attributes
10553  *
10554  * Note: Find DB_VALUE of desired attribute identifier.
10555  * The dbvalue attributes must have been read by now using the
10556  * function heap_attrinfo_read_dbvalues ()
10557  */
10558 DB_VALUE *
10560 {
10561  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
10562 
10563  /* check to make sure the attr_info has been used */
10564  if (attr_info->num_values == -1)
10565  {
10566  return NULL;
10567  }
10568 
10569  value = heap_attrvalue_locate (attrid, attr_info);
10570  if (value == NULL || value->state == HEAP_UNINIT_ATTRVALUE)
10571  {
10572  er_log_debug (ARG_FILE_LINE, "heap_attrinfo_access: Unknown attrid = %d", attrid);
10574  return NULL;
10575  }
10576 
10577  return &value->dbvalue;
10578 }
10579 
10580 /*
10581  * heap_get_class_subclasses () - get OIDs of subclasses for a given class
10582  * return : error code or NO_ERROR
10583  * thread_p (in) :
10584  * class_oid (in) : OID of the parent class
10585  * count (out) : size of the subclasses array
10586  * subclasses (out) : array containing OIDs of subclasses
10587  *
10588  * Note: The subclasses array is maintained as an array of OID's,
10589  * the last element in the array will satisfy the OID_ISNULL() test.
10590  * The array_size has the number of actual elements allocated in the
10591  * array which may be more than the number of slots that have non-NULL
10592  * OIDs. The function adds the subclass oids to the existing array.
10593  * If the array is not large enough, it is reallocated using realloc.
10594  */
10595 int
10596 heap_get_class_subclasses (THREAD_ENTRY * thread_p, const OID * class_oid, int *count, OID ** subclasses)
10597 {
10598  HEAP_SCANCACHE scan_cache;
10599  RECDES recdes;
10600  int error = NO_ERROR;
10601 
10602  error = heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
10603  if (error != NO_ERROR)
10604  {
10605  return error;
10606  }
10607 
10608  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, PEEK) != S_SUCCESS)
10609  {
10610  heap_scancache_end (thread_p, &scan_cache);
10611  return ER_FAILED;
10612  }
10613 
10614  error = orc_subclasses_from_record (&recdes, count, subclasses);
10615 
10616  heap_scancache_end (thread_p, &scan_cache);
10617 
10618  return error;
10619 }
10620 
10621 /*
10622  * heap_class_get_partition_info () - Get partition information for the class
10623  * identified by class_oid
10624  * return : error code or NO_ERROR
10625  * class_oid (in) : class_oid
10626  * partition_info (in/out) : partition information
10627  * class_hfid (in/out) : HFID of the partitioned class
10628  * repr_id (in/out) : class representation id
10629  * has_partition_info (out):
10630  *
10631  * Note: This function extracts the partition information from a class OID.
10632  */
10633 static int
10634 heap_class_get_partition_info (THREAD_ENTRY * thread_p, const OID * class_oid, OR_PARTITION * partition_info,
10635  HFID * class_hfid, REPR_ID * repr_id, int *has_partition_info)
10636 {
10637  int error = NO_ERROR;
10638  RECDES recdes;
10639  HEAP_SCANCACHE scan_cache;
10640 
10641  assert (class_oid != NULL);
10642 
10643  if (heap_scancache_quick_start_root_hfid (thread_p, &scan_cache) != NO_ERROR)
10644  {
10645  return ER_FAILED;
10646  }
10647 
10648  scan_cache.mvcc_snapshot = logtb_get_mvcc_snapshot (thread_p);
10649  if (scan_cache.mvcc_snapshot == NULL)
10650  {
10651  error = er_errid ();
10652  return (error == NO_ERROR ? ER_FAILED : error);
10653  }
10654 
10655  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, PEEK) != S_SUCCESS)
10656  {
10657  error = ER_FAILED;
10658  goto cleanup;
10659  }
10660 
10661  error = or_class_get_partition_info (&recdes, partition_info, repr_id, has_partition_info);
10662  if (error != NO_ERROR)
10663  {
10664  goto cleanup;
10665  }
10666 
10667  if (class_hfid != NULL)
10668  {
10669  or_class_hfid (&recdes, class_hfid);
10670  }
10671 
10672 cleanup:
10673  heap_scancache_end (thread_p, &scan_cache);
10674 
10675  return error;
10676 }
10677 
10678 /*
10679  * heap_get_partition_attributes () - get attribute ids for columns of
10680  * _db_partition class
10681  * return : error code or NO_ERROR
10682  * thread_p (in) :
10683  * cls_oid (in) : _db_partition class OID
10684  * type_id (in/out) : holder for the type attribute id
10685  * values_id (in/out) : holder for the values attribute id
10686  */
10687 static int
10688 heap_get_partition_attributes (THREAD_ENTRY * thread_p, const OID * cls_oid, ATTR_ID * type_id, ATTR_ID * values_id)
10689 {
10690  RECDES recdes;
10691  HEAP_SCANCACHE scan;
10692  HEAP_CACHE_ATTRINFO attr_info;
10693  int error = NO_ERROR;
10694  int i = 0;
10695  char *attr_name = NULL;
10696  bool is_scan_cache_started = false, is_attrinfo_started = false;
10697  char *string = NULL;
10698  int alloced_string = 0;
10699 
10700 
10701  if (type_id == NULL || values_id == NULL)
10702  {
10703  assert (false);
10704  error = ER_FAILED;
10705  goto cleanup;
10706  }
10707  *type_id = *values_id = NULL_ATTRID;
10708 
10709  if (heap_scancache_quick_start_root_hfid (thread_p, &scan) != NO_ERROR)
10710  {
10711  error = ER_FAILED;
10712  goto cleanup;
10713  }
10714  is_scan_cache_started = true;
10715 
10716  error = heap_attrinfo_start (thread_p, cls_oid, -1, NULL, &attr_info);
10717  if (error != NO_ERROR)
10718  {
10719  goto cleanup;
10720  }
10721  is_attrinfo_started = true;
10722 
10723  if (heap_get_class_record (thread_p, cls_oid, &recdes, &scan, PEEK) != S_SUCCESS)
10724  {
10725  error = ER_FAILED;
10726  goto cleanup;
10727  }
10728 
10729  for (i = 0; i < attr_info.num_values && (*type_id == NULL_ATTRID || *values_id == NULL_ATTRID); i++)
10730  {
10731  alloced_string = 0;
10732  string = NULL;
10733 
10734  error = or_get_attrname (&recdes, i, &string, &alloced_string);
10735  if (error != NO_ERROR)
10736  {
10737  ASSERT_ERROR ();
10738  goto cleanup;
10739  }
10740 
10741  attr_name = string;
10742  if (attr_name == NULL)
10743  {
10744  error = ER_FAILED;
10745  goto cleanup;
10746  }
10747  if (strcmp (attr_name, "ptype") == 0)
10748  {
10749  *type_id = i;
10750  }
10751 
10752  if (strcmp (attr_name, "pvalues") == 0)
10753  {
10754  *values_id = i;
10755  }
10756 
10757  if (string != NULL && alloced_string == 1)
10758  {
10759  db_private_free_and_init (thread_p, string);
10760  }
10761  }
10762 
10763  if (*type_id == NULL_ATTRID || *values_id == NULL_ATTRID)
10764  {
10766  error = ER_FAILED;
10767  }
10768 
10769 cleanup:
10770  if (is_attrinfo_started)
10771  {
10772  heap_attrinfo_end (thread_p, &attr_info);
10773  }
10774  if (is_scan_cache_started)
10775  {
10776  heap_scancache_end (thread_p, &scan);
10777  }
10778  return error;
10779 }
10780 
10781 /*
10782  * heap_get_partitions_from_subclasses () - Get partition information from a
10783  * list of subclasses
10784  * return : error code or NO_ERROR
10785  * thread_p (in) :
10786  * subclasses (in) : subclasses OIDs
10787  * parts_count (in/out) : number of "useful" elements in parts
10788  * parts (in/out) : partitions
10789  *
10790  * Note: Memory for the partition array must be allocated before calling this
10791  * function and must be enough to store all partitions. The value from
10792  * position 0 in the partitions array will contain information from the
10793  * master class
10794  */
10795 static int
10796 heap_get_partitions_from_subclasses (THREAD_ENTRY * thread_p, const OID * subclasses, int *parts_count,
10797  OR_PARTITION * parts)
10798 {
10799  int part_idx = 0, i;
10800  int error = NO_ERROR;
10801  HFID part_hfid;
10802  REPR_ID repr_id;
10803  int has_partition_info = 0;
10804 
10805  if (parts == NULL)
10806  {
10807  assert (false);
10808  error = ER_FAILED;
10809  goto cleanup;
10810  }
10811 
10812  /* the partition information for the master class will be set by the caller */
10813  part_idx = 1;
10814 
10815  /* loop through subclasses and load partition information if the subclass is a partition */
10816  for (i = 0; !OID_ISNULL (&subclasses[i]); i++)
10817  {
10818  /* Get partition information from this subclass. part_info will be the OID of the tuple from _db_partition
10819  * containing partition information */
10820  error =
10821  heap_class_get_partition_info (thread_p, &subclasses[i], &parts[part_idx], &part_hfid, &repr_id,
10822  &has_partition_info);
10823  if (error != NO_ERROR)
10824  {
10825  goto cleanup;
10826  }
10827 
10828  if (has_partition_info == 0)
10829  {
10830  /* this is not a partition, this is a simple subclass */
10831  continue;
10832  }
10833 
10834  COPY_OID (&(parts[part_idx].class_oid), &subclasses[i]);
10835  HFID_COPY (&(parts[part_idx].class_hfid), &part_hfid);
10836  parts[part_idx].rep_id = repr_id;
10837 
10838  part_idx++;
10839  }
10840  *parts_count = part_idx;
10841 
10842 cleanup:
10843  if (error != NO_ERROR)
10844  {
10845  /* free memory for the values of partitions */
10846  for (i = 1; i < part_idx; i++)
10847  {
10848  if (parts[i].values != NULL)
10849  {
10850  db_seq_free (parts[i].values);
10851  }
10852  }
10853  }
10854  return error;
10855 }
10856 
10857 /*
10858  * heap_get_class_partitions () - get partitions information for a class
10859  * return : error code or NO_ERROR
10860  * thread_p (in) :
10861  * class_oid (in) : class OID
10862  * parts (in/out) : partitions information
10863  * parts_count (in/out) : number of partitions
10864  */
10865 int
10866 heap_get_class_partitions (THREAD_ENTRY * thread_p, const OID * class_oid, OR_PARTITION ** parts, int *parts_count)
10867 {
10868  int subclasses_count = 0;
10869  OID *subclasses = NULL;
10870  OR_PARTITION part_info;
10871  int error = NO_ERROR;
10872  OR_PARTITION *partitions = NULL;
10873  REPR_ID class_repr_id = NULL_REPRID;
10874  HFID class_hfid;
10875  int has_partition_info = 0;
10876 
10877  *parts = NULL;
10878  *parts_count = 0;
10879  part_info.values = NULL;
10880 
10881  /* This class might have partitions and subclasses. In order to get partition information we have to: 1. Get the OIDs
10882  * for all subclasses 2. Get partition information for all OIDs 3. Build information only for those subclasses which
10883  * are partitions */
10884  error =
10885  heap_class_get_partition_info (thread_p, class_oid, &part_info, &class_hfid, &class_repr_id, &has_partition_info);
10886  if (error != NO_ERROR)
10887  {
10888  goto cleanup;
10889  }
10890 
10891  if (has_partition_info == 0)
10892  {
10893  /* this class does not have partitions */
10894  error = NO_ERROR;
10895  goto cleanup;
10896  }
10897 
10898  /* Get OIDs for subclasses of class_oid. Some of them will be partitions */
10899  error = heap_get_class_subclasses (thread_p, class_oid, &subclasses_count, &subclasses);
10900  if (error != NO_ERROR)
10901  {
10902  goto cleanup;
10903  }
10904  else if (subclasses_count == 0)
10905  {
10906  /* This means that class_oid actually points to a partition and not the master class. We return NO_ERROR here
10907  * since there's no partition information */
10908  error = NO_ERROR;
10909  goto cleanup;
10910  }
10911 
10912  /* Allocate memory for partitions. We allocate more memory than needed here because the call to
10913  * heap_get_class_subclasses from above actually returned a larger count than the useful information. Also, not all
10914  * subclasses are necessarily partitions. */
10915  partitions = (OR_PARTITION *) db_private_alloc (thread_p, (subclasses_count + 1) * sizeof (OR_PARTITION));
10916  if (partitions == NULL)
10917  {
10919  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 1, (subclasses_count + 1) * sizeof (OR_PARTITION));
10920  goto cleanup;
10921  }
10922 
10923  error = heap_get_partitions_from_subclasses (thread_p, subclasses, parts_count, partitions);
10924  if (error != NO_ERROR)
10925  {
10926  ASSERT_ERROR ();
10927  goto cleanup;
10928  }
10929 
10930  /* fill the information for the root (partitioned class) */
10931  COPY_OID (&partitions[0].class_oid, class_oid);
10932  HFID_COPY (&partitions[0].class_hfid, &class_hfid);
10933  partitions[0].partition_type = part_info.partition_type;
10934  partitions[0].rep_id = class_repr_id;
10935  partitions[0].values = NULL;
10936  if (part_info.values != NULL)
10937  {
10938  partitions[0].values = set_copy (part_info.values);
10939  if (partitions[0].values == NULL)
10940  {
10941  error = er_errid ();
10942  goto cleanup;
10943  }
10944  set_free (part_info.values);
10945  part_info.values = NULL;
10946  }
10947 
10948  *parts = partitions;
10949 
10950 cleanup:
10951  if (subclasses != NULL)
10952  {
10953  free_and_init (subclasses);
10954  }
10955  if (part_info.values != NULL)
10956  {
10957  set_free (part_info.values);
10958  }
10959  if (error != NO_ERROR && partitions != NULL)
10960  {
10961  db_private_free (thread_p, partitions);
10962  *parts = NULL;
10963  *parts_count = 0;
10964  }
10965  return error;
10966 }
10967 
10968 /*
10969  * heap_clear_partition_info () - free partitions info from heap_get_class_partitions
10970  * return : void
10971  * thread_p (in) :
10972  * parts (in) : partitions information
10973  * parts_count (in) : number of partitions
10974  */
10975 void
10976 heap_clear_partition_info (THREAD_ENTRY * thread_p, OR_PARTITION * parts, int parts_count)
10977 {
10978  if (parts != NULL)
10979  {
10980  int i;
10981 
10982  for (i = 0; i < parts_count; i++)
10983  {
10984  if (parts[i].values != NULL)
10985  {
10986  db_seq_free (parts[i].values);
10987  }
10988  }
10989 
10990  db_private_free (thread_p, parts);
10991  }
10992 }
10993 
10994 /*
10995  * heap_get_class_supers () - get OIDs of superclasses of a class
10996  * return : error code or NO_ERROR
10997  * thread_p (in) : thread entry
10998  * class_oid (in) : OID of the subclass
10999  * super_oids (in/out) : OIDs of the superclasses
11000  * count (in/out) : number of elements in super_oids
11001  */
11002 int
11003 heap_get_class_supers (THREAD_ENTRY * thread_p, const OID * class_oid, OID ** super_oids, int *count)
11004 {
11005  HEAP_SCANCACHE scan_cache;
11006  RECDES recdes;
11007  int error = NO_ERROR;
11008 
11009  error = heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
11010  if (error != NO_ERROR)
11011  {
11012  return error;
11013  }
11014 
11015  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, PEEK) != S_SUCCESS)
11016  {
11017  heap_scancache_end (thread_p, &scan_cache);
11018  return ER_FAILED;
11019  }
11020 
11021  error = orc_superclasses_from_record (&recdes, count, super_oids);
11022 
11023  heap_scancache_end (thread_p, &scan_cache);
11024 
11025  return error;
11026 }
11027 
11028 /*
11029  * heap_attrinfo_check () -
11030  * return: NO_ERROR
11031  * inst_oid(in): The instance oid
11032  * attr_info(in): The attribute information structure which describe the
11033  * desired attributes
11034  */
11035 static int
11036 heap_attrinfo_check (const OID * inst_oid, HEAP_CACHE_ATTRINFO * attr_info)
11037 {
11038  int ret = NO_ERROR;
11039 
11040  if (inst_oid != NULL)
11041  {
11042  /*
11043  * The OIDs must be equal
11044  */
11045  if (!OID_EQ (&attr_info->inst_oid, inst_oid))
11046  {
11047  if (!OID_ISNULL (&attr_info->inst_oid))
11048  {
11049  ret = ER_HEAP_WRONG_ATTRINFO;
11051  attr_info->inst_oid.pageid, attr_info->inst_oid.slotid, inst_oid->volid, inst_oid->pageid,
11052  inst_oid->slotid);
11053  goto exit_on_error;
11054  }
11055 
11056  attr_info->inst_oid = *inst_oid;
11057  }
11058  }
11059  else
11060  {
11061  if (!OID_ISNULL (&attr_info->inst_oid))
11062  {
11063  ret = ER_HEAP_WRONG_ATTRINFO;
11064  er_set (ER_FATAL_ERROR_SEVERITY, ARG_FILE_LINE, ret, 6, attr_info->inst_oid.volid, attr_info->inst_oid.pageid,
11066  goto exit_on_error;
11067  }
11068  }
11069 
11070  return ret;
11071 
11072 exit_on_error:
11073 
11074  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
11075 }
11076 
11077 /*
11078  * heap_attrinfo_set () - Set the value of given attribute
11079  * return: NO_ERROR
11080  * inst_oid(in): The instance oid
11081  * attrid(in): The identifier of the attribute to be set
11082  * attr_val(in): The memory value of the attribute
11083  * attr_info(in/out): The attribute information structure which describe the
11084  * desired attributes
11085  *
11086  * Note: Set DB_VALUE of desired attribute identifier.
11087  */
11088 int
11089 heap_attrinfo_set (const OID * inst_oid, ATTR_ID attrid, DB_VALUE * attr_val, HEAP_CACHE_ATTRINFO * attr_info)
11090 {
11091  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
11092  PR_TYPE *pr_type; /* Primitive type array function structure */
11093  TP_DOMAIN_STATUS dom_status;
11094  int ret = NO_ERROR;
11095 
11096  /*
11097  * check to make sure the attr_info has been used, should never be empty.
11098  */
11099 
11100  if (attr_info->num_values == -1)
11101  {
11102  return ER_FAILED;
11103  }
11104 
11105  ret = heap_attrinfo_check (inst_oid, attr_info);
11106  if (ret != NO_ERROR)
11107  {
11108  goto exit_on_error;
11109  }
11110 
11111  value = heap_attrvalue_locate (attrid, attr_info);
11112  if (value == NULL)
11113  {
11114  goto exit_on_error;
11115  }
11116 
11118  if (pr_type == NULL)
11119  {
11120  goto exit_on_error;
11121  }
11122 
11123  ret = pr_clear_value (&value->dbvalue);
11124  if (ret != NO_ERROR)
11125  {
11126  goto exit_on_error;
11127  }
11128 
11129  ret =
11131  value->last_attrepr->domain->scale);
11132  if (ret != NO_ERROR)
11133  {
11134  goto exit_on_error;
11135  }
11136 
11137  /*
11138  * As we use "writeval" to do the writing and that function gets
11139  * enough domain information, we can use non-exact domain matching
11140  * here to defer the coercion until it is written.
11141  */
11142  dom_status = tp_domain_check (value->last_attrepr->domain, attr_val, TP_EXACT_MATCH);
11143  if (dom_status == DOMAIN_COMPATIBLE)
11144  {
11145  /*
11146  * the domains match exactly, set the value and proceed. Copy
11147  * the source only if it's a set-valued thing (that's the purpose
11148  * of the third argument).
11149  */
11150  ret = (*(pr_type->setval)) (&value->dbvalue, attr_val, TP_IS_SET_TYPE (pr_type->id));
11151  }
11152  else
11153  {
11154  /* the domains don't match, must attempt coercion */
11155  dom_status = tp_value_auto_cast (attr_val, &value->dbvalue, value->last_attrepr->domain);
11156  if (dom_status != DOMAIN_COMPATIBLE)
11157  {
11158  ret = tp_domain_status_er_set (dom_status, ARG_FILE_LINE, attr_val, value->last_attrepr->domain);
11159  assert (er_errid () != NO_ERROR);
11160 
11161  db_make_null (&value->dbvalue);
11162  }
11163  }
11164 
11165  if (ret != NO_ERROR)
11166  {
11167  goto exit_on_error;
11168  }
11169 
11170  value->state = HEAP_WRITTEN_ATTRVALUE;
11171 
11172  return ret;
11173 
11174 exit_on_error:
11175 
11176  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
11177 }
11178 
11179 /*
11180  * heap_attrinfo_set_uninitialized () - Read unitialized attributes
11181  * return: NO_ERROR
11182  * inst_oid(in): The instance oid
11183  * recdes(in): The instance record descriptor
11184  * attr_info(in/out): The attribute information structure which describe the
11185  * desired attributes
11186  *
11187  * Note: Read the db values of the unitialized attributes from the
11188  * given recdes. This function is used when we are ready to
11189  * transform an object that has been updated/inserted in the server.
11190  * If the object has been updated, recdes must be the old object
11191  * (the one on disk), so we can set the rest of the uninitialized
11192  * attributes from the old object.
11193  * If the object is a new one, recdes should be NULL, since there
11194  * is not an object on disk, the rest of the unitialized
11195  * attributes are set from default values.
11196  */
11197 static int
11198 heap_attrinfo_set_uninitialized (THREAD_ENTRY * thread_p, OID * inst_oid, RECDES * recdes,
11199  HEAP_CACHE_ATTRINFO * attr_info)
11200 {
11201  int i;
11202  REPR_ID reprid; /* Representation of object */
11203  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
11204  int ret = NO_ERROR;
11205 
11206  ret = heap_attrinfo_check (inst_oid, attr_info);
11207  if (ret != NO_ERROR)
11208  {
11209  goto exit_on_error;
11210  }
11211 
11212  /*
11213  * Make sure that we have the needed cached representation.
11214  */
11215 
11216  if (recdes != NULL)
11217  {
11218  reprid = or_rep_id (recdes);
11219  }
11220  else
11221  {
11222  reprid = attr_info->last_classrepr->id;
11223  }
11224 
11225  if (attr_info->read_classrepr == NULL || attr_info->read_classrepr->id != reprid)
11226  {
11227  /* Get the needed representation */
11228  ret = heap_attrinfo_recache (thread_p, reprid, attr_info);
11229  if (ret != NO_ERROR)
11230  {
11231  goto exit_on_error;
11232  }
11233  }
11234 
11235  /*
11236  * Go over the attribute values and set the ones that have not been
11237  * initialized
11238  */
11239  for (i = 0; i < attr_info->num_values; i++)
11240  {
11241  value = &attr_info->values[i];
11242  if (value->state == HEAP_UNINIT_ATTRVALUE)
11243  {
11244  ret = heap_attrvalue_read (recdes, value, attr_info);
11245  if (ret != NO_ERROR)
11246  {
11247  goto exit_on_error;
11248  }
11249  }
11250  else if (value->state == HEAP_WRITTEN_ATTRVALUE
11251  && (value->last_attrepr->type == DB_TYPE_BLOB || value->last_attrepr->type == DB_TYPE_CLOB))
11252  {
11253  DB_VALUE *save;
11254  save = db_value_copy (&value->dbvalue);
11255  pr_clear_value (&value->dbvalue);
11256 
11257  /* read and delete old value */
11258  ret = heap_attrvalue_read (recdes, value, attr_info);
11259  if (ret != NO_ERROR)
11260  {
11261  goto exit_on_error;
11262  }
11263  if (!db_value_is_null (&value->dbvalue))
11264  {
11265  DB_ELO *elo;
11266 
11268  || db_value_type (&value->dbvalue) == DB_TYPE_CLOB);
11269  elo = db_get_elo (&value->dbvalue);
11270  if (elo)
11271  {
11272  ret = db_elo_delete (elo);
11273  }
11274  pr_clear_value (&value->dbvalue);
11275  ret = (ret >= 0 ? NO_ERROR : ret);
11276  if (ret != NO_ERROR)
11277  {
11278  goto exit_on_error;
11279  }
11280  }
11281  value->state = HEAP_WRITTEN_ATTRVALUE;
11282  pr_clone_value (save, &value->dbvalue);
11283  pr_free_ext_value (save);
11284  }
11285  }
11286 
11287  if (recdes != NULL)
11288  {
11289  attr_info->inst_chn = or_chn (recdes);
11290  }
11291  else
11292  {
11293  attr_info->inst_chn = -1;
11294  }
11295 
11296  return ret;
11297 
11298 exit_on_error:
11299 
11300  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
11301 }
11302 
11303 /*
11304  * heap_attrinfo_get_disksize () - Find the disk size needed to transform the object
11305  * represented by attr_info
11306  * return: size of the object
11307  * attr_info(in/out): The attribute information structure
11308  * is_mvcc_class(in): true, if MVCC class
11309  * offset_size_ptr(out): offset size
11310  *
11311  * Note: Find the disk size needed to transform the object represented
11312  * by the attribute information structure.
11313  */
11314 static int
11315 heap_attrinfo_get_disksize (HEAP_CACHE_ATTRINFO * attr_info, bool is_mvcc_class, int *offset_size_ptr)
11316 {
11317  int i, size;
11318  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
11319 
11320  *offset_size_ptr = OR_BYTE_SIZE;
11321 
11322 re_check:
11323  size = 0;
11324  for (i = 0; i < attr_info->num_values; i++)
11325  {
11326  value = &attr_info->values[i];
11327 
11328  if (value->last_attrepr->is_fixed != 0)
11329  {
11331  }
11332  else
11333  {
11335  }
11336  }
11337 
11338  if (is_mvcc_class)
11339  {
11341  }
11342  else
11343  {
11345  }
11346 
11347  size += OR_VAR_TABLE_SIZE_INTERNAL (attr_info->last_classrepr->n_variable, *offset_size_ptr);
11349 
11350  if (*offset_size_ptr == OR_BYTE_SIZE && size > OR_MAX_BYTE)
11351  {
11352  *offset_size_ptr = OR_SHORT_SIZE; /* 2byte */
11353  goto re_check;
11354  }
11355  if (*offset_size_ptr == OR_SHORT_SIZE && size > OR_MAX_SHORT)
11356  {
11357  *offset_size_ptr = BIG_VAR_OFFSET_SIZE; /* 4byte */
11358  goto re_check;
11359  }
11360 
11361  return size;
11362 }
11363 
11364 /*
11365  * heap_attrinfo_transform_to_disk () - Transform to disk an attribute information
11366  * kind of instance
11367  * return: SCAN_CODE
11368  * (Either of S_SUCCESS, S_DOESNT_FIT,
11369  * S_ERROR)
11370  * attr_info(in/out): The attribute information structure
11371  * old_recdes(in): where the object's disk format is deposited
11372  * new_recdes(in):
11373  *
11374  * Note: Transform the object represented by attr_info to disk format
11375  */
11376 SCAN_CODE
11378  RECDES * new_recdes)
11379 {
11380  return heap_attrinfo_transform_to_disk_internal (thread_p, attr_info, old_recdes, new_recdes, LOB_FLAG_INCLUDE_LOB);
11381 }
11382 
11383 /*
11384  * heap_attrinfo_transform_to_disk_except_lob () -
11385  * Transform to disk an attribute information
11386  * kind of instance. Do not create lob.
11387  * return: SCAN_CODE
11388  * (Either of S_SUCCESS, S_DOESNT_FIT,
11389  * S_ERROR)
11390  * attr_info(in/out): The attribute information structure
11391  * old_recdes(in): where the object's disk format is deposited
11392  * new_recdes(in):
11393  *
11394  * Note: Transform the object represented by attr_info to disk format
11395  */
11396 SCAN_CODE
11398  RECDES * old_recdes, RECDES * new_recdes)
11399 {
11400  return heap_attrinfo_transform_to_disk_internal (thread_p, attr_info, old_recdes, new_recdes, LOB_FLAG_EXCLUDE_LOB);
11401 }
11402 
11403 /*
11404  * heap_attrinfo_transform_to_disk_internal () -
11405  * Transform to disk an attribute information
11406  * kind of instance.
11407  * return: SCAN_CODE
11408  * (Either of S_SUCCESS, S_DOESNT_FIT,
11409  * S_ERROR)
11410  * attr_info(in/out): The attribute information structure
11411  * old_recdes(in): where the object's disk format is deposited
11412  * new_recdes(in):
11413  * lob_create_flag(in):
11414  *
11415  * Note: Transform the object represented by attr_info to disk format
11416  */
11417 static SCAN_CODE
11418 heap_attrinfo_transform_to_disk_internal (THREAD_ENTRY * thread_p, HEAP_CACHE_ATTRINFO * attr_info, RECDES * old_recdes,
11419  RECDES * new_recdes, int lob_create_flag)
11420 {
11421  OR_BUF orep, *buf;
11422  char *ptr_bound, *ptr_varvals;
11423  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
11424  DB_VALUE temp_dbvalue;
11425  PR_TYPE *pr_type; /* Primitive type array function structure */
11426  unsigned int repid_bits;
11427  SCAN_CODE status;
11428  int i;
11429  DB_VALUE *dbvalue = NULL;
11430  int expected_size, tmp;
11431  volatile int offset_size;
11432  int mvcc_wasted_space = 0, header_size;
11433  bool is_mvcc_class;
11434 
11435  /* check to make sure the attr_info has been used, it should not be empty. */
11436  if (attr_info->num_values == -1)
11437  {
11438  return S_ERROR;
11439  }
11440 
11441  /*
11442  * Get any of the values that have not been set/read
11443  */
11444  if (heap_attrinfo_set_uninitialized (thread_p, &attr_info->inst_oid, old_recdes, attr_info) != NO_ERROR)
11445  {
11446  return S_ERROR;
11447  }
11448 
11449  /* Start transforming the dbvalues into disk values for the object */
11450  OR_BUF_INIT2 (orep, new_recdes->data, new_recdes->area_size);
11451  buf = &orep;
11452 
11453  is_mvcc_class = !mvcc_is_mvcc_disabled_class (&(attr_info->class_oid));
11454  expected_size = heap_attrinfo_get_disksize (attr_info, is_mvcc_class, &tmp);
11455  offset_size = tmp;
11456 
11457  if (is_mvcc_class)
11458  {
11459  mvcc_wasted_space = (OR_MVCC_MAX_HEADER_SIZE - OR_MVCC_INSERT_HEADER_SIZE);
11460  if (old_recdes != NULL)
11461  {
11462  /* Update case, reserve space for previous version LSA. */
11463  expected_size += OR_MVCC_PREV_VERSION_LSA_SIZE;
11464  mvcc_wasted_space -= OR_MVCC_PREV_VERSION_LSA_SIZE;
11465  }
11466  }
11467 
11468  /* reserve enough space if need to add additional MVCC header info */
11469  expected_size += mvcc_wasted_space;
11470 
11471  switch (_setjmp (buf->env))
11472  {
11473  case 0:
11474  status = S_SUCCESS;
11475 
11476  /*
11477  * Store the representation of the class along with bound bit
11478  * flag information
11479  */
11480 
11481  repid_bits = attr_info->last_classrepr->id;
11482  /*
11483  * Do we have fixed value attributes ?
11484  */
11485  if ((attr_info->last_classrepr->n_attributes - attr_info->last_classrepr->n_variable) != 0)
11486  {
11487  repid_bits |= OR_BOUND_BIT_FLAG;
11488  }
11489 
11490  /* offset size */
11491  OR_SET_VAR_OFFSET_SIZE (repid_bits, offset_size);
11492 
11493  /*
11494  * We must increase the current value by one so that clients
11495  * can detect the change in object. That is, clients will need to
11496  * refetch the object.
11497  */
11498  attr_info->inst_chn++;
11499  if (is_mvcc_class)
11500  {
11501  if (old_recdes == NULL)
11502  {
11504  or_put_int (buf, repid_bits);
11505  or_put_int (buf, 0); /* CHN */
11506  or_put_bigint (buf, 0); /* MVCC insert id */
11507  header_size = OR_MVCC_INSERT_HEADER_SIZE;
11508  }
11509  else
11510  {
11511  LOG_LSA null_lsa = LSA_INITIALIZER;
11513  or_put_int (buf, repid_bits);
11514  or_put_int (buf, 0); /* CHN */
11515  or_put_bigint (buf, 0); /* MVCC insert id */
11516 
11517  assert ((buf->ptr + OR_MVCC_PREV_VERSION_LSA_SIZE) <= buf->endptr);
11518  or_put_data (buf, (char *) &null_lsa, OR_MVCC_PREV_VERSION_LSA_SIZE); /* prev version lsa */
11520  }
11521  }
11522  else
11523  {
11524  or_put_int (buf, repid_bits);
11525  or_put_int (buf, attr_info->inst_chn);
11526  header_size = OR_NON_MVCC_HEADER_SIZE;
11527  }
11528 
11529  /*
11530  * Calculate the pointer address to variable offset attribute table,
11531  * fixed attributes, and variable attributes
11532  */
11533 
11534  ptr_bound = OR_GET_BOUND_BITS (buf->buffer, attr_info->last_classrepr->n_variable,
11535  attr_info->last_classrepr->fixed_length);
11536 
11537  /*
11538  * Variable offset table is relative to the beginning of the buffer
11539  */
11540 
11541  ptr_varvals = (ptr_bound
11543  - attr_info->last_classrepr->n_variable));
11544 
11545  /* Need to make sure that the bound array is not past the allocated buffer because OR_ENABLE_BOUND_BIT() will
11546  * just slam the bound bit without checking the length. */
11547 
11548  if (ptr_varvals + mvcc_wasted_space >= buf->endptr)
11549  {
11550  new_recdes->length = -expected_size; /* set to negative */
11551  return S_DOESNT_FIT;
11552  }
11553 
11554  for (i = 0; i < attr_info->num_values; i++)
11555  {
11556  value = &attr_info->values[i];
11557  dbvalue = &value->dbvalue;
11558  pr_type = value->last_attrepr->domain->type;
11559  if (pr_type == NULL)
11560  {
11561  return S_ERROR;
11562  }
11563 
11564  /*
11565  * Is this a fixed or variable attribute ?
11566  */
11567  if (value->last_attrepr->is_fixed != 0)
11568  {
11569  /*
11570  * Fixed attribute
11571  * Write the fixed attributes values, if unbound, does not matter
11572  * what value is stored. We need to set the appropiate bit in the
11573  * bound bit array for fixed attributes. For variable attributes,
11574  */
11575  buf->ptr = (buf->buffer
11577  + value->last_attrepr->location);
11578 
11579  if (value->do_increment)
11580  {
11581  if (qdata_increment_dbval (dbvalue, dbvalue, value->do_increment) != NO_ERROR)
11582  {
11583  status = S_ERROR;
11584  break;
11585  }
11586  }
11587 
11588  if (dbvalue == NULL || db_value_is_null (dbvalue) == true)
11589  {
11590  /*
11591  * This is an unbound value.
11592  * 1) Set any value in the fixed array value table, so we can
11593  * advance to next attribute.
11594  * 2) and set the bound bit as unbound
11595  */
11596  db_value_domain_init (&temp_dbvalue, value->last_attrepr->type,
11597  value->last_attrepr->domain->precision, value->last_attrepr->domain->scale);
11598  dbvalue = &temp_dbvalue;
11599  OR_CLEAR_BOUND_BIT (ptr_bound, value->last_attrepr->position);
11600 
11601  /*
11602  * pad the appropriate amount, writeval needs to be modified
11603  * to accept a domain so it can perform this padding.
11604  */
11605  or_pad (buf, tp_domain_disk_size (value->last_attrepr->domain));
11606 
11607  }
11608  else
11609  {
11610  /*
11611  * Write the value.
11612  */
11613  OR_ENABLE_BOUND_BIT (ptr_bound, value->last_attrepr->position);
11614  (*(pr_type->data_writeval)) (buf, dbvalue);
11615  }
11616  }
11617  else
11618  {
11619  /*
11620  * Variable attribute
11621  * 1) Set the offset to this value in the variable offset table
11622  * 2) Set the value in the variable value portion of the disk
11623  * object (Only if the value is bound)
11624  */
11625 
11626  /*
11627  * Write the offset onto the variable offset table and remember
11628  * the current pointer to the variable offset table
11629  */
11630 
11631  if (value->do_increment != 0)
11632  {
11633  status = S_ERROR;
11634  break;
11635  }
11636 
11637  buf->ptr = (char *) (OR_VAR_ELEMENT_PTR (buf->buffer, value->last_attrepr->location));
11638  /* compute the variable offsets relative to the end of the header (beginning of variable table) */
11639  or_put_offset_internal (buf, CAST_BUFLEN (ptr_varvals - buf->buffer - header_size), offset_size);
11640 
11641  if (dbvalue != NULL && db_value_is_null (dbvalue) != true)
11642  {
11643  /*
11644  * Now write the value and remember the current pointer
11645  * to variable value array for the next element.
11646  */
11647  buf->ptr = ptr_varvals;
11648 
11649  if (lob_create_flag == LOB_FLAG_INCLUDE_LOB && value->state == HEAP_WRITTEN_ATTRVALUE
11650  && (pr_type->id == DB_TYPE_BLOB || pr_type->id == DB_TYPE_CLOB))
11651  {
11652  DB_ELO dest_elo, *elo_p;
11653  char *save_meta_data, *new_meta_data;
11654  int error;
11655 
11656  assert (db_value_type (dbvalue) == DB_TYPE_BLOB || db_value_type (dbvalue) == DB_TYPE_CLOB);
11657 
11658  elo_p = db_get_elo (dbvalue);
11659 
11660  if (elo_p == NULL)
11661  {
11662  continue;
11663  }
11664 
11665  if (heap_get_class_name (thread_p, &(attr_info->class_oid), &new_meta_data) != NO_ERROR
11666  || new_meta_data == NULL)
11667  {
11668  status = S_ERROR;
11669  break;
11670  }
11671  save_meta_data = elo_p->meta_data;
11672  elo_p->meta_data = new_meta_data;
11673  error = db_elo_copy (db_get_elo (dbvalue), &dest_elo);
11674 
11675  free_and_init (elo_p->meta_data);
11676  elo_p->meta_data = save_meta_data;
11677 
11678  /* The purpose of HEAP_WRITTEN_LOB_ATTRVALUE is to avoid reenter this branch. In the first pass,
11679  * this branch is entered and elo is copied. When BUFFER_OVERFLOW happens, we need avoid to copy
11680  * elo again. Otherwize it will generate 2 copies. */
11682 
11683  error = (error >= 0 ? NO_ERROR : error);
11684  if (error == NO_ERROR)
11685  {
11686  pr_clear_value (dbvalue);
11687  db_make_elo (dbvalue, pr_type->id, &dest_elo);
11688  dbvalue->need_clear = true;
11689  }
11690  else
11691  {
11692  status = S_ERROR;
11693  break;
11694  }
11695  }
11696 
11697  (*(pr_type->data_writeval)) (buf, dbvalue);
11698  ptr_varvals = buf->ptr;
11699  }
11700  }
11701  }
11702 
11703  if (attr_info->last_classrepr->n_variable > 0)
11704  {
11705  /*
11706  * The last element of the variable offset table points to the end of
11707  * the object. The variable offset array starts with zero, so we can
11708  * just access n_variable...
11709  */
11710 
11711  /* Write the offset to the end of the variable attributes table */
11712  buf->ptr = ((char *) (OR_VAR_ELEMENT_PTR (buf->buffer, attr_info->last_classrepr->n_variable)));
11713  or_put_offset_internal (buf, CAST_BUFLEN (ptr_varvals - buf->buffer - header_size), offset_size);
11714  buf->ptr = PTR_ALIGN (buf->ptr, INT_ALIGNMENT);
11715  }
11716 
11717  /* Record the length of the object */
11718  new_recdes->length = CAST_BUFLEN (ptr_varvals - buf->buffer);
11719 
11720  /* if not enough MVCC wasted space need to reallocate */
11721  if (ptr_varvals + mvcc_wasted_space < buf->endptr)
11722  {
11723  break;
11724  }
11725 
11726  /*
11727  * if the longjmp status was anything other than ER_TF_BUFFER_OVERFLOW,
11728  * it represents an error condition and er_set will have been called
11729  */
11730  case ER_TF_BUFFER_OVERFLOW:
11731 
11732  status = S_DOESNT_FIT;
11733 
11734  /*
11735  * Give a hint of the needed space. The hint is given as a negative
11736  * value in the record descriptor length. Make sure that this length
11737  * is larger than the current record descriptor area.
11738  */
11739 
11740  new_recdes->length = -expected_size; /* set to negative */
11741 
11742  if (new_recdes->area_size > -new_recdes->length)
11743  {
11744  /*
11745  * This may be an error. The estimated disk size is smaller
11746  * than the current record descriptor area size. For now assume
11747  * at least 20% above the current area descriptor. The main problem
11748  * is that heap_attrinfo_get_disksize () guess its size as much as
11749  * possible
11750  */
11751  new_recdes->length = -(int) (new_recdes->area_size * 1.20);
11752  }
11753  break;
11754 
11755  default:
11756  status = S_ERROR;
11757  break;
11758  }
11759 
11760  return status;
11761 }
11762 
11763 /*
11764  * heap_attrinfo_start_refoids () - Initialize an attribute information structure
11765  * with attributes that may reference other objects
11766  * return: NO_ERROR
11767  * class_oid(in): The class identifier of the instances where values
11768  * attributes values are going to be read.
11769  * attr_info(in/out): The attribute information structure
11770  *
11771  * Note: Initialize an attribute information structure with attributes
11772  * that may reference other objects (OIDs).
11773  *
11774  * Note: The caller must call heap_attrinfo_end after he is done with
11775  * attribute information.
11776  */
11777 
11778 static int
11779 heap_attrinfo_start_refoids (THREAD_ENTRY * thread_p, OID * class_oid, HEAP_CACHE_ATTRINFO * attr_info)
11780 {
11781  ATTR_ID guess_attrids[HEAP_GUESS_NUM_ATTRS_REFOIDS];
11782  ATTR_ID *set_attrids;
11783  int num_found_attrs;
11784  OR_CLASSREP *classrepr;
11785  int classrepr_cacheindex = -1;
11786  OR_ATTRIBUTE *search_attrepr;
11787  int i;
11788  int ret = NO_ERROR;
11789 
11790  attr_info->num_values = -1;
11791 
11792  /*
11793  * Find the current representation of the class, then scan all its
11794  * attributes finding the ones that may reference objects
11795  */
11796 
11797  classrepr = heap_classrepr_get (thread_p, class_oid, NULL, NULL_REPRID, &classrepr_cacheindex);
11798  if (classrepr == NULL)
11799  {
11800  return ER_FAILED;
11801  }
11802 
11803  /*
11804  * Go over the list of attributes until the desired attributes (OIDs, sets)
11805  * are found
11806  */
11807 
11808  if (classrepr->n_attributes > HEAP_GUESS_NUM_ATTRS_REFOIDS)
11809  {
11810  set_attrids = (ATTR_ID *) malloc (classrepr->n_attributes * sizeof (ATTR_ID));
11811  if (set_attrids == NULL)
11812  {
11814  classrepr->n_attributes * sizeof (ATTR_ID));
11815  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
11816  return ER_OUT_OF_VIRTUAL_MEMORY;
11817  }
11818  }
11819  else
11820  {
11821  set_attrids = guess_attrids;
11822  }
11823 
11824  for (i = 0, num_found_attrs = 0; i < classrepr->n_attributes; i++)
11825  {
11826  search_attrepr = &classrepr->attributes[i];
11827  if (tp_domain_references_objects (search_attrepr->domain) == true)
11828  {
11829  set_attrids[num_found_attrs++] = search_attrepr->id;
11830  }
11831  }
11832 
11833  ret = heap_attrinfo_start (thread_p, class_oid, num_found_attrs, set_attrids, attr_info);
11834 
11835  if (set_attrids != guess_attrids)
11836  {
11837  free_and_init (set_attrids);
11838  }
11839 
11840  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
11841 
11842  return ret;
11843 }
11844 
11845 /*
11846  * heap_attrinfo_start_with_index () -
11847  * return:
11848  * class_oid(in):
11849  * class_recdes(in):
11850  * attr_info(in):
11851  * idx_info(in):
11852  */
11853 int
11854 heap_attrinfo_start_with_index (THREAD_ENTRY * thread_p, OID * class_oid, RECDES * class_recdes,
11855  HEAP_CACHE_ATTRINFO * attr_info, HEAP_IDX_ELEMENTS_INFO * idx_info)
11856 {
11857  ATTR_ID guess_attrids[HEAP_GUESS_NUM_INDEXED_ATTRS];
11858  ATTR_ID *set_attrids;
11859  int num_found_attrs;
11860  OR_CLASSREP *classrepr = NULL;
11861  int classrepr_cacheindex = -1;
11862  OR_ATTRIBUTE *search_attrepr;
11863  int i, j;
11864  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
11865  int *num_btids;
11866  OR_INDEX *indexp;
11867 
11868  idx_info->has_single_col = 0;
11869  idx_info->has_multi_col = 0;
11870  idx_info->num_btids = 0;
11871 
11872  num_btids = &idx_info->num_btids;
11873 
11874  set_attrids = guess_attrids;
11875  attr_info->num_values = -1; /* initialize attr_info */
11876 
11877  classrepr = heap_classrepr_get (thread_p, class_oid, class_recdes, NULL_REPRID, &classrepr_cacheindex);
11878  if (classrepr == NULL)
11879  {
11880  return ER_FAILED;
11881  }
11882 
11883  if (classrepr->n_attributes > HEAP_GUESS_NUM_INDEXED_ATTRS)
11884  {
11885  set_attrids = (ATTR_ID *) malloc (classrepr->n_attributes * sizeof (ATTR_ID));
11886  if (set_attrids == NULL)
11887  {
11889  classrepr->n_attributes * sizeof (ATTR_ID));
11890  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
11891  return ER_OUT_OF_VIRTUAL_MEMORY;
11892  }
11893  }
11894  else
11895  {
11896  set_attrids = guess_attrids;
11897  }
11898 
11899  /*
11900  * Read the number of BTID's in this class
11901  */
11902  *num_btids = classrepr->n_indexes;
11903 
11904  for (j = 0; j < *num_btids; j++)
11905  {
11906  indexp = &classrepr->indexes[j];
11907  if (indexp->n_atts == 1)
11908  {
11909  idx_info->has_single_col = 1;
11910  }
11911  else if (indexp->n_atts > 1)
11912  {
11913  idx_info->has_multi_col = 1;
11914  }
11915  /* check for already found both */
11916  if (idx_info->has_single_col && idx_info->has_multi_col)
11917  {
11918  break;
11919  }
11920  }
11921 
11922  /*
11923  * Go over the list of attrs until all indexed attributes (OIDs, sets)
11924  * are found
11925  */
11926  for (i = 0, num_found_attrs = 0, search_attrepr = classrepr->attributes; i < classrepr->n_attributes;
11927  i++, search_attrepr++)
11928  {
11929  if (search_attrepr->n_btids <= 0)
11930  {
11931  continue;
11932  }
11933 
11934  if (idx_info->has_single_col)
11935  {
11936  for (j = 0; j < *num_btids; j++)
11937  {
11938  indexp = &classrepr->indexes[j];
11939  if (indexp->n_atts == 1 && indexp->atts[0]->id == search_attrepr->id)
11940  {
11941  set_attrids[num_found_attrs++] = search_attrepr->id;
11942  break;
11943  }
11944  }
11945  }
11946  } /* for (i = 0 ...) */
11947 
11948  if (idx_info->has_multi_col == 0 && num_found_attrs == 0)
11949  {
11950  /* initialize the attrinfo cache and return, there is nothing else to do */
11951  /* (void) memset(attr_info, '\0', sizeof (HEAP_CACHE_ATTRINFO)); */
11952 
11953  /* now set the num_values to -1 which indicates that this is an empty HEAP_CACHE_ATTRINFO and shouldn't be
11954  * operated on. */
11955  attr_info->num_values = -1;
11956 
11957  /* free the class representation */
11958  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
11959  }
11960  else
11961  { /* num_found_attrs > 0 */
11962  /* initialize attribute information */
11963  attr_info->class_oid = *class_oid;
11964  attr_info->last_cacheindex = classrepr_cacheindex;
11965  attr_info->read_cacheindex = -1;
11966  attr_info->last_classrepr = classrepr;
11967  attr_info->read_classrepr = NULL;
11968  OID_SET_NULL (&attr_info->inst_oid);
11969  attr_info->inst_chn = NULL_CHN;
11970  attr_info->num_values = num_found_attrs;
11971 
11972  if (num_found_attrs <= 0)
11973  {
11974  attr_info->values = NULL;
11975  }
11976  else
11977  {
11978  attr_info->values =
11979  (HEAP_ATTRVALUE *) db_private_alloc (thread_p, (num_found_attrs * sizeof (HEAP_ATTRVALUE)));
11980  if (attr_info->values == NULL)
11981  {
11982  /* free the class representation */
11983  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
11984  attr_info->num_values = -1;
11985  goto error;
11986  }
11987  }
11988 
11989  /*
11990  * Set the attribute identifier of the desired attributes in the value
11991  * attribute information, and indicates that the current value is
11992  * unitialized. That is, it has not been read, set or whatever.
11993  */
11994  for (i = 0; i < attr_info->num_values; i++)
11995  {
11996  value = &attr_info->values[i];
11997  value->attrid = set_attrids[i];
11998  value->state = HEAP_UNINIT_ATTRVALUE;
11999  value->last_attrepr = NULL;
12000  value->read_attrepr = NULL;
12001  }
12002 
12003  /*
12004  * Make last information to be recached for each individual attribute
12005  * value. Needed for WRITE and Default values
12006  */
12007  if (heap_attrinfo_recache_attrepr (attr_info, true) != NO_ERROR)
12008  {
12009  /* classrepr will be freed in heap_attrinfo_end */
12010  heap_attrinfo_end (thread_p, attr_info);
12011  goto error;
12012  }
12013  }
12014 
12015  if (set_attrids != guess_attrids)
12016  {
12017  free_and_init (set_attrids);
12018  }
12019 
12020  if (num_found_attrs == 0 && idx_info->has_multi_col)
12021  {
12022  return 1;
12023  }
12024  else
12025  {
12026  return num_found_attrs;
12027  }
12028 
12029  /* **** */
12030 error:
12031 
12032  if (set_attrids != guess_attrids)
12033  {
12034  free_and_init (set_attrids);
12035  }
12036 
12037  return ER_FAILED;
12038 }
12039 
12040 /*
12041  * heap_classrepr_find_index_id () - Find the indicated index ID from the class repr
12042  * return: ID of desired index ot -1 if an error occurred.
12043  * classrepr(in): The class representation.
12044  * btid(in): The BTID of the interested index.
12045  *
12046  * Note: Locate the desired index by matching it with the passed BTID.
12047  * Return the ID of the index if found.
12048  */
12049 int
12051 {
12052  int i;
12053  int id = -1;
12054 
12055  for (i = 0; i < classrepr->n_indexes; i++)
12056  {
12057  if (BTID_IS_EQUAL (&(classrepr->indexes[i].btid), btid))
12058  {
12059  id = i;
12060  break;
12061  }
12062  }
12063 
12064  return id;
12065 }
12066 
12067 /*
12068  * heap_attrinfo_start_with_btid () - Initialize an attribute information structure
12069  * return: ID for the index which corresponds to the passed BTID.
12070  * If an error occurred, a -1 is returned.
12071  * class_oid(in): The class identifier of the instances where values
12072  * attributes values are going to be read.
12073  * btid(in): The BTID of the interested index.
12074  * attr_info(in/out): The attribute information structure
12075  *
12076  * Note: Initialize an attribute information structure, so that values
12077  * of instances can be retrieved based on the desired attributes.
12078  *
12079  * There are currently three functions which can be used to
12080  * initialize the attribute information structure; heap_attrinfo_start(),
12081  * heap_attrinfo_start_with_index() and this one. This function determines
12082  * which attributes belong to the passed BTID and populate the
12083  * information structure on those attributes.
12084  *
12085  * The attrinfo structure is an structure where values of
12086  * instances can be read. For example an object is retrieved,
12087  * then some of its attributes are convereted to dbvalues and
12088  * placed in this structure.
12089  *
12090  * Note: The caller must call heap_attrinfo_end after he is done with
12091  * attribute information.
12092  */
12093 int
12095 {
12096  ATTR_ID guess_attrids[HEAP_GUESS_NUM_INDEXED_ATTRS];
12097  ATTR_ID *set_attrids;
12098  OR_CLASSREP *classrepr = NULL;
12099  int i;
12100  int index_id = -1;
12101  int classrepr_cacheindex = -1;
12102  int num_found_attrs = 0;
12103 
12104  /*
12105  * We'll start by assuming that the number of attributes will fit into
12106  * the preallocated array.
12107  */
12108  set_attrids = guess_attrids;
12109 
12110  attr_info->num_values = -1; /* initialize attr_info */
12111 
12112  /*
12113  * Get the class representation so that we can access the indexes.
12114  */
12115  classrepr = heap_classrepr_get (thread_p, class_oid, NULL, NULL_REPRID, &classrepr_cacheindex);
12116  if (classrepr == NULL)
12117  {
12118  goto error;
12119  }
12120 
12121  /*
12122  * Get the index ID which corresponds to the BTID
12123  */
12124  index_id = heap_classrepr_find_index_id (classrepr, btid);
12125  if (index_id == -1)
12126  {
12127  goto error;
12128  }
12129 
12130  /*
12131  * Get the number of attributes associated with this index.
12132  * Allocate a new attribute ID array if we have more attributes
12133  * than will fit in the pre-allocated array.
12134  * Fill the array with the attribute ID's
12135  * Free the class representation.
12136  */
12137  num_found_attrs = classrepr->indexes[index_id].n_atts;
12138  if (num_found_attrs > HEAP_GUESS_NUM_INDEXED_ATTRS)
12139  {
12140  set_attrids = (ATTR_ID *) malloc (num_found_attrs * sizeof (ATTR_ID));
12141  if (set_attrids == NULL)
12142  {
12143  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, num_found_attrs * sizeof (ATTR_ID));
12144  goto error;
12145  }
12146  }
12147 
12148  for (i = 0; i < num_found_attrs; i++)
12149  {
12150  set_attrids[i] = classrepr->indexes[index_id].atts[i]->id;
12151  }
12152 
12153  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
12154 
12155  /*
12156  * Get the attribute information for the collected ID's
12157  */
12158  if (num_found_attrs > 0)
12159  {
12160  if (heap_attrinfo_start (thread_p, class_oid, num_found_attrs, set_attrids, attr_info) != NO_ERROR)
12161  {
12162  goto error;
12163  }
12164  }
12165 
12166  /*
12167  * Free the attribute ID array if it was dynamically allocated
12168  */
12169  if (set_attrids != guess_attrids)
12170  {
12171  free_and_init (set_attrids);
12172  }
12173 
12174  return index_id;
12175 
12176  /* **** */
12177 error:
12178 
12179  if (classrepr)
12180  {
12181  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
12182  }
12183 
12184  if (set_attrids != guess_attrids)
12185  {
12186  free_and_init (set_attrids);
12187  }
12188 
12189  return ER_FAILED;
12190 }
12191 
12192 #if defined (ENABLE_UNUSED_FUNCTION)
12193 /*
12194  * heap_attrvalue_get_index () -
12195  * return:
12196  * value_index(in):
12197  * attrid(in):
12198  * n_btids(in):
12199  * btids(in):
12200  * idx_attrinfo(in):
12201  */
12202 DB_VALUE *
12203 heap_attrvalue_get_index (int value_index, ATTR_ID * attrid, int *n_btids, BTID ** btids,
12204  HEAP_CACHE_ATTRINFO * idx_attrinfo)
12205 {
12206  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
12207 
12208  /* check to make sure the idx_attrinfo has been used, it should never be empty. */
12209  if (idx_attrinfo->num_values == -1)
12210  {
12211  return NULL;
12212  }
12213 
12214  if (value_index > idx_attrinfo->num_values || value_index < 0)
12215  {
12216  *n_btids = 0;
12217  *btids = NULL;
12218  *attrid = NULL_ATTRID;
12219  return NULL;
12220  }
12221  else
12222  {
12223  value = &idx_attrinfo->values[value_index];
12224  *n_btids = value->last_attrepr->n_btids;
12225  *btids = value->last_attrepr->btids;
12226  *attrid = value->attrid;
12227  return &value->dbvalue;
12228  }
12229 
12230 }
12231 #endif
12232 
12233 /*
12234  * heap_midxkey_key_get () -
12235  * return:
12236  * recdes(in):
12237  * midxkey(in/out):
12238  * index(in):
12239  * attrinfo(in):
12240  * func_domain(in):
12241  * key_domain(out):
12242  */
12243 static DB_MIDXKEY *
12244 heap_midxkey_key_get (RECDES * recdes, DB_MIDXKEY * midxkey, OR_INDEX * index, HEAP_CACHE_ATTRINFO * attrinfo,
12245  DB_VALUE * func_res, TP_DOMAIN * func_domain, TP_DOMAIN ** key_domain)
12246 {
12247  char *nullmap_ptr;
12248  OR_ATTRIBUTE **atts;
12249  int num_atts, i, k;
12250  DB_VALUE value;
12251  OR_BUF buf;
12252  int error = NO_ERROR;
12253  TP_DOMAIN *set_domain = NULL;
12254  TP_DOMAIN *next_domain = NULL;
12255 
12256  assert (index != NULL);
12257 
12258  num_atts = index->n_atts;
12259  atts = index->atts;
12260  if (func_res)
12261  {
12262  num_atts = index->func_index_info->attr_index_start + 1;
12263  }
12264  assert (PTR_ALIGN (midxkey->buf, INT_ALIGNMENT) == midxkey->buf);
12265 
12266  or_init (&buf, midxkey->buf, -1);
12267 
12268  nullmap_ptr = midxkey->buf;
12269  or_advance (&buf, pr_midxkey_init_boundbits (nullmap_ptr, num_atts));
12270  k = 0;
12271  for (i = 0; i < num_atts && k < num_atts; i++)
12272  {
12273  if (index->func_index_info && (i == index->func_index_info->col_id))
12274  {
12275  assert (func_domain != NULL);
12276 
12277  if (!db_value_is_null (func_res))
12278  {
12279  (*(func_domain->type->index_writeval)) (&buf, func_res);
12280  OR_ENABLE_BOUND_BIT (nullmap_ptr, k);
12281  }
12282 
12283  if (key_domain != NULL)
12284  {
12285  if (k == 0)
12286  {
12287  assert (set_domain == NULL);
12288  set_domain = tp_domain_copy (func_domain, 0);
12289  if (set_domain == NULL)
12290  {
12291  assert (false);
12292  goto error;
12293  }
12294  next_domain = set_domain;
12295  }
12296  else
12297  {
12298  next_domain->next = tp_domain_copy (func_domain, 0);
12299  if (next_domain->next == NULL)
12300  {
12301  assert (false);
12302  goto error;
12303  }
12304  next_domain = next_domain->next;
12305  }
12306  }
12307 
12308  k++;
12309  }
12310  if (k == num_atts)
12311  {
12312  break;
12313  }
12314  error = heap_midxkey_get_value (recdes, atts[i], &value, attrinfo);
12315  if (error == NO_ERROR && !db_value_is_null (&value))
12316  {
12317  (*(atts[i]->domain->type->index_writeval)) (&buf, &value);
12318  OR_ENABLE_BOUND_BIT (nullmap_ptr, k);
12319  }
12320 
12321  if (DB_NEED_CLEAR (&value))
12322  {
12323  pr_clear_value (&value);
12324  }
12325  if (key_domain != NULL)
12326  {
12327  if (k == 0)
12328  {
12329  assert (set_domain == NULL);
12330  set_domain = tp_domain_copy (atts[i]->domain, 0);
12331  if (set_domain == NULL)
12332  {
12333  assert (false);
12334  goto error;
12335  }
12336  if (index->asc_desc[i] != 0)
12337  {
12338  set_domain->is_desc = 1;
12339  }
12340  next_domain = set_domain;
12341  }
12342  else
12343  {
12344  next_domain->next = tp_domain_copy (atts[i]->domain, 0);
12345  if (next_domain->next == NULL)
12346  {
12347  assert (false);
12348  goto error;
12349  }
12350  if (index->asc_desc[i] != 0)
12351  {
12352  next_domain->next->is_desc = 1;
12353  }
12354  next_domain = next_domain->next;
12355  }
12356  }
12357  k++;
12358  }
12359 
12360  midxkey->size = CAST_BUFLEN (buf.ptr - buf.buffer);
12361  midxkey->ncolumns = num_atts;
12362  midxkey->domain = NULL;
12363 
12364  if (key_domain != NULL)
12365  {
12366  *key_domain = tp_domain_construct (DB_TYPE_MIDXKEY, (DB_OBJECT *) 0, num_atts, 0, set_domain);
12367 
12368  if (*key_domain)
12369  {
12370  *key_domain = tp_domain_cache (*key_domain);
12371  }
12372  else
12373  {
12374  assert (false);
12375  goto error;
12376  }
12377  }
12378 
12379  return midxkey;
12380 
12381 error:
12382 
12383  if (set_domain)
12384  {
12385  TP_DOMAIN *td, *next;
12386 
12387  for (td = set_domain, next = NULL; td != NULL; td = next)
12388  {
12389  next = td->next;
12390  tp_domain_free (td);
12391  }
12392  }
12393 
12394  return NULL;
12395 }
12396 
12397 /*
12398  * heap_midxkey_key_generate () -
12399  * return:
12400  * recdes(in):
12401  * midxkey(in):
12402  * att_ids(in):
12403  * attrinfo(in):
12404  */
12405 static DB_MIDXKEY *
12406 heap_midxkey_key_generate (THREAD_ENTRY * thread_p, RECDES * recdes, DB_MIDXKEY * midxkey, int *att_ids,
12407  HEAP_CACHE_ATTRINFO * attrinfo, DB_VALUE * func_res, int func_col_id,
12408  int func_attr_index_start)
12409 {
12410  char *nullmap_ptr;
12411  int num_vals, i, reprid, k;
12412  OR_ATTRIBUTE *att;
12413  DB_VALUE value;
12414  OR_BUF buf;
12415  int error = NO_ERROR;
12416 
12417  /*
12418  * Make sure that we have the needed cached representation.
12419  */
12420 
12421  if (recdes != NULL)
12422  {
12423  reprid = or_rep_id (recdes);
12424 
12425  if (attrinfo->read_classrepr == NULL || attrinfo->read_classrepr->id != reprid)
12426  {
12427  /* Get the needed representation */
12428  if (heap_attrinfo_recache (thread_p, reprid, attrinfo) != NO_ERROR)
12429  {
12430  return NULL;
12431  }
12432  }
12433  }
12434 
12435  assert (PTR_ALIGN (midxkey->buf, INT_ALIGNMENT) == midxkey->buf);
12436 
12437  or_init (&buf, midxkey->buf, -1);
12438 
12439  nullmap_ptr = midxkey->buf;
12440 
12441  /* On constructing index */
12442  num_vals = attrinfo->num_values;
12443  if (func_res)
12444  {
12445  num_vals = func_attr_index_start + 1;
12446  }
12447  or_advance (&buf, pr_midxkey_init_boundbits (nullmap_ptr, num_vals));
12448  k = 0;
12449  for (i = 0; i < num_vals && k < num_vals; i++)
12450  {
12451  if (i == func_col_id)
12452  {
12453  if (!db_value_is_null (func_res))
12454  {
12456  (*(domain->type->index_writeval)) (&buf, func_res);
12457  OR_ENABLE_BOUND_BIT (nullmap_ptr, k);
12458  }
12459  k++;
12460  }
12461  if (k == num_vals)
12462  {
12463  break;
12464  }
12465  att = heap_locate_attribute (att_ids[i], attrinfo);
12466 
12467  error = heap_midxkey_get_value (recdes, att, &value, attrinfo);
12468  if (error == NO_ERROR && !db_value_is_null (&value))
12469  {
12470  (*(att->domain->type->index_writeval)) (&buf, &value);
12471  OR_ENABLE_BOUND_BIT (nullmap_ptr, k);
12472  }
12473 
12474  if (DB_NEED_CLEAR (&value))
12475  {
12476  pr_clear_value (&value);
12477  }
12478 
12479  k++;
12480  }
12481 
12482  if (value.need_clear == true)
12483  {
12484  pr_clear_value (&value);
12485  }
12486  midxkey->size = CAST_BUFLEN (buf.ptr - buf.buffer);
12487  midxkey->ncolumns = num_vals;
12488  midxkey->domain = NULL;
12489  midxkey->min_max_val.position = -1;
12490  midxkey->min_max_val.type = MIN_COLUMN;
12491 
12492  return midxkey;
12493 }
12494 
12495 /*
12496  * heap_attrinfo_generate_key () - Generate a key from the attribute information.
12497  * return: Pointer to DB_VALUE containing the key.
12498  * n_atts(in): Size of attribute ID array.
12499  * att_ids(in): Array of attribute ID's
12500  * attr_info(in): Pointer to attribute information structure. This
12501  * structure contains the BTID's, the attributes and their
12502  * values.
12503  * recdes(in):
12504  * db_valuep(in): Pointer to a DB_VALUE. This db_valuep will be used to
12505  * contain the set key in the case of multi-column B-trees.
12506  * It is ignored for single-column B-trees.
12507  * buf(in):
12508  *
12509  * Note: Return a key for the specified attribute ID's
12510  *
12511  * If n_atts=1, the key will be the value of that attribute
12512  * and we will return a pointer to that DB_VALUE.
12513  *
12514  * If n_atts>1, the key will be a sequence of the attribute
12515  * values. The set will be constructed and contained with
12516  * the passed DB_VALUE. A pointer to this DB_VALUE is returned.
12517  *
12518  * It is important for the caller to deallocate this memory
12519  * by calling pr_clear_value().
12520  */
12521 DB_VALUE *
12522 heap_attrinfo_generate_key (THREAD_ENTRY * thread_p, int n_atts, int *att_ids, int *atts_prefix_length,
12523  HEAP_CACHE_ATTRINFO * attr_info, RECDES * recdes, DB_VALUE * db_valuep, char *buf,
12524  FUNCTION_INDEX_INFO * func_index_info)
12525 {
12526  DB_VALUE *ret_valp;
12527  DB_VALUE *fi_res = NULL;
12528  int fi_attr_index_start = -1;
12529  int fi_col_id = -1;
12530 
12531  assert (DB_IS_NULL (db_valuep));
12532 
12533  if (func_index_info)
12534  {
12535  fi_attr_index_start = func_index_info->attr_index_start;
12536  fi_col_id = func_index_info->col_id;
12537  if (heap_eval_function_index (thread_p, func_index_info, n_atts, att_ids, attr_info, recdes, -1, db_valuep,
12538  NULL, NULL) != NO_ERROR)
12539  {
12540  return NULL;
12541  }
12542  fi_res = db_valuep;
12543  }
12544 
12545  /*
12546  * Multi-column index. The key is a sequence of the attribute values.
12547  * Return a pointer to the attributes DB_VALUE.
12548  */
12549  if ((n_atts > 1 && func_index_info == NULL) || (func_index_info && (func_index_info->attr_index_start + 1) > 1))
12550  {
12551  DB_MIDXKEY midxkey;
12552  int midxkey_size = recdes->length;
12553 
12554  if (func_index_info != NULL)
12555  {
12556  /* this will allocate more than it is needed to store the key, but there is no decent way to calculate the
12557  * correct size */
12558  midxkey_size += OR_VALUE_ALIGNED_SIZE (fi_res);
12559  }
12560 
12561  /* Allocate storage for the buf of midxkey */
12562  if (midxkey_size > DBVAL_BUFSIZE)
12563  {
12564  midxkey.buf = (char *) db_private_alloc (thread_p, midxkey_size);
12565  if (midxkey.buf == NULL)
12566  {
12567  return NULL;
12568  }
12569  }
12570  else
12571  {
12572  midxkey.buf = buf;
12573  }
12574 
12575  if (heap_midxkey_key_generate (thread_p, recdes, &midxkey, att_ids, attr_info, fi_res, fi_col_id,
12576  fi_attr_index_start) == NULL)
12577  {
12578  return NULL;
12579  }
12580 
12581  (void) pr_clear_value (db_valuep);
12582 
12583  db_make_midxkey (db_valuep, &midxkey);
12584 
12585  if (midxkey_size > DBVAL_BUFSIZE)
12586  {
12587  db_valuep->need_clear = true;
12588  }
12589 
12590  ret_valp = db_valuep;
12591  }
12592  else
12593  {
12594  /*
12595  * Single-column index. The key is simply the value of the attribute.
12596  * Return a pointer to the attributes DB_VALUE.
12597  */
12598  if (func_index_info)
12599  {
12600  ret_valp = db_valuep;
12601  return ret_valp;
12602  }
12603 
12604  ret_valp = heap_attrinfo_access (att_ids[0], attr_info);
12605  if (ret_valp != NULL && atts_prefix_length && n_atts == 1)
12606  {
12607  if (*atts_prefix_length != -1 && QSTR_IS_ANY_CHAR_OR_BIT (DB_VALUE_DOMAIN_TYPE (ret_valp)))
12608  {
12609  /* prefix index */
12610  pr_clone_value (ret_valp, db_valuep);
12611  db_string_truncate (db_valuep, *atts_prefix_length);
12612  ret_valp = db_valuep;
12613  }
12614  }
12615  }
12616 
12617  return ret_valp;
12618 }
12619 
12620 /*
12621  * heap_attrvalue_get_key () - Get B-tree key from attribute value(s)
12622  * return: Pointer to DB_VALUE containing the key.
12623  * btid_index(in): Index into an array of BTID's from the OR_CLASSREP
12624  * structure contained in idx_attrinfo.
12625  * idx_attrinfo(in): Pointer to attribute information structure. This
12626  * structure contains the BTID's, the attributes and their
12627  * values.
12628  * recdes(in):
12629  * btid(out): Pointer to a BTID. The value of the current BTID
12630  * will be returned.
12631  * db_value(in): Pointer to a DB_VALUE. This db_value will be used to
12632  * contain the set key in the case of multi-column B-trees.
12633  * It is ignored for single-column B-trees.
12634  * buf(in):
12635  * func_preds(in): cached function index expressions
12636  * key_domain(out): domain of key
12637  *
12638  * Note: Return a B-tree key for the specified B-tree ID.
12639  *
12640  * If the specified B-tree ID is associated with a single
12641  * attribute the key will be the value of that attribute
12642  * and we will return a pointer to that DB_VALUE.
12643  *
12644  * If the BTID is associated with multiple attributes the
12645  * key will be a set containing the values of the attributes.
12646  * The set will be constructed and contained within the
12647  * passed DB_VALUE. A pointer to this DB_VALUE is returned.
12648  * It is important for the caller to deallocate this memory
12649  * by calling pr_clear_value().
12650  */
12651 DB_VALUE *
12652 heap_attrvalue_get_key (THREAD_ENTRY * thread_p, int btid_index, HEAP_CACHE_ATTRINFO * idx_attrinfo, RECDES * recdes,
12653  BTID * btid, DB_VALUE * db_value, char *buf, FUNC_PRED_UNPACK_INFO * func_indx_pred,
12654  TP_DOMAIN ** key_domain)
12655 {
12656  OR_INDEX *index;
12657  int n_atts, reprid;
12658  DB_VALUE *ret_val = NULL;
12659  DB_VALUE *fi_res = NULL;
12660  TP_DOMAIN *fi_domain = NULL;
12661 
12663 
12664  /*
12665  * check to make sure the idx_attrinfo has been used, it should
12666  * never be empty.
12667  */
12668  if ((idx_attrinfo->num_values == -1) || (btid_index >= idx_attrinfo->last_classrepr->n_indexes))
12669  {
12670  return NULL;
12671  }
12672 
12673  /*
12674  * Make sure that we have the needed cached representation.
12675  */
12676  if (recdes != NULL)
12677  {
12678  reprid = or_rep_id (recdes);
12679 
12680  if (idx_attrinfo->read_classrepr == NULL || idx_attrinfo->read_classrepr->id != reprid)
12681  {
12682  /* Get the needed representation */
12683  if (heap_attrinfo_recache (thread_p, reprid, idx_attrinfo) != NO_ERROR)
12684  {
12685  return NULL;
12686  }
12687  }
12688  }
12689 
12690  index = &(idx_attrinfo->last_classrepr->indexes[btid_index]);
12691  n_atts = index->n_atts;
12692  *btid = index->btid;
12693 
12694  /* is function index */
12695  if (index->func_index_info)
12696  {
12697  if (heap_eval_function_index (thread_p, NULL, -1, NULL, idx_attrinfo, recdes, btid_index, db_value,
12698  func_indx_pred, &fi_domain) != NO_ERROR)
12699  {
12700  return NULL;
12701  }
12702  fi_res = db_value;
12703  }
12704 
12705  /*
12706  * Multi-column index. Construct the key as a sequence of attribute
12707  * values. The sequence is contained in the passed DB_VALUE. A
12708  * pointer to this DB_VALUE is returned.
12709  */
12710  if ((n_atts > 1 && recdes != NULL && index->func_index_info == NULL)
12711  || (index->func_index_info && (index->func_index_info->attr_index_start + 1) > 1))
12712  {
12713  DB_MIDXKEY midxkey;
12714  int midxkey_size = recdes->length;
12715 
12716  if (index->func_index_info != NULL)
12717  {
12718  /* this will allocate more than it is needed to store the key, but there is no decent way to calculate the
12719  * correct size */
12720  midxkey_size += OR_VALUE_ALIGNED_SIZE (fi_res);
12721  }
12722 
12723  /* Allocate storage for the buf of midxkey */
12724  if (midxkey_size > DBVAL_BUFSIZE)
12725  {
12726  midxkey.buf = (char *) db_private_alloc (thread_p, midxkey_size);
12727  if (midxkey.buf == NULL)
12728  {
12729  return NULL;
12730  }
12731  }
12732  else
12733  {
12734  midxkey.buf = buf;
12735  }
12736 
12737  midxkey.min_max_val.position = -1;
12738 
12739  if (heap_midxkey_key_get (recdes, &midxkey, index, idx_attrinfo, fi_res, fi_domain, key_domain) == NULL)
12740  {
12741  return NULL;
12742  }
12743 
12744  (void) pr_clear_value (db_value);
12745 
12746  db_make_midxkey (db_value, &midxkey);
12747 
12748  if (midxkey_size > DBVAL_BUFSIZE)
12749  {
12750  db_value->need_clear = true;
12751  }
12752 
12753  ret_val = db_value;
12754  }
12755  else
12756  {
12757  /*
12758  * Single-column index. The key is simply the value of the attribute.
12759  * Return a pointer to the attributes DB_VALUE.
12760  */
12761 
12762  /* Find the matching attribute identified by the attribute ID */
12763  if (fi_res)
12764  {
12765  ret_val = fi_res;
12766  if (key_domain != NULL)
12767  {
12768  assert (fi_domain != NULL);
12769  *key_domain = tp_domain_cache (fi_domain);
12770  }
12771  return ret_val;
12772  }
12773  ret_val = heap_attrinfo_access (index->atts[0]->id, idx_attrinfo);
12774 
12775  if (ret_val != NULL && index->attrs_prefix_length != NULL && index->attrs_prefix_length[0] != -1)
12776  {
12778  {
12779  pr_clone_value (ret_val, db_value);
12781  ret_val = db_value;
12782  }
12783  }
12784 
12785  if (key_domain != NULL)
12786  {
12787  if (index->attrs_prefix_length != NULL && index->attrs_prefix_length[0] != -1)
12788  {
12789  TP_DOMAIN *attr_dom;
12790  TP_DOMAIN *prefix_dom;
12791  DB_TYPE attr_type;
12792 
12793  attr_type = TP_DOMAIN_TYPE (index->atts[0]->domain);
12794 
12795  assert (QSTR_IS_ANY_CHAR_OR_BIT (attr_type));
12796 
12797  attr_dom = index->atts[0]->domain;
12798 
12799  prefix_dom =
12800  tp_domain_find_charbit (attr_type, TP_DOMAIN_CODESET (attr_dom), TP_DOMAIN_COLLATION (attr_dom),
12801  TP_DOMAIN_COLLATION_FLAG (attr_dom), attr_dom->precision, attr_dom->is_desc);
12802 
12803  if (prefix_dom == NULL)
12804  {
12805  prefix_dom = tp_domain_construct (attr_type, NULL, index->attrs_prefix_length[0], 0, NULL);
12806  if (prefix_dom != NULL)
12807  {
12808  prefix_dom->codeset = TP_DOMAIN_CODESET (attr_dom);
12809  prefix_dom->collation_id = TP_DOMAIN_COLLATION (attr_dom);
12810  prefix_dom->collation_flag = TP_DOMAIN_COLLATION_FLAG (attr_dom);
12811  prefix_dom->is_desc = attr_dom->is_desc;
12812  }
12813  }
12814 
12815  if (prefix_dom == NULL)
12816  {
12817  return NULL;
12818  }
12819  else
12820  {
12821  *key_domain = tp_domain_cache (prefix_dom);
12822  }
12823  }
12824  else
12825  {
12826  *key_domain = tp_domain_cache (index->atts[0]->domain);
12827  }
12828  }
12829  }
12830 
12831  return ret_val;
12832 }
12833 
12834 /*
12835  * heap_indexinfo_get_btid () -
12836  * return:
12837  * btid_index(in):
12838  * attrinfo(in):
12839  */
12840 BTID *
12841 heap_indexinfo_get_btid (int btid_index, HEAP_CACHE_ATTRINFO * attrinfo)
12842 {
12843  if (btid_index != -1 && btid_index < attrinfo->last_classrepr->n_indexes)
12844  {
12845  return &(attrinfo->last_classrepr->indexes[btid_index].btid);
12846  }
12847  else
12848  {
12849  return NULL;
12850  }
12851 }
12852 
12853 /*
12854  * heap_indexinfo_get_num_attrs () -
12855  * return:
12856  * btid_index(in):
12857  * attrinfo(in):
12858  */
12859 int
12861 {
12862  if (btid_index != -1 && btid_index < attrinfo->last_classrepr->n_indexes)
12863  {
12864  return attrinfo->last_classrepr->indexes[btid_index].n_atts;
12865  }
12866  else
12867  {
12868  return 0;
12869  }
12870 }
12871 
12872 /*
12873  * heap_indexinfo_get_attrids () -
12874  * return: NO_ERROR
12875  * btid_index(in):
12876  * attrinfo(in):
12877  * attrids(in):
12878  */
12879 int
12880 heap_indexinfo_get_attrids (int btid_index, HEAP_CACHE_ATTRINFO * attrinfo, ATTR_ID * attrids)
12881 {
12882  int i;
12883  int ret = NO_ERROR;
12884 
12885  if (btid_index != -1 && (btid_index < attrinfo->last_classrepr->n_indexes))
12886  {
12887  for (i = 0; i < attrinfo->last_classrepr->indexes[btid_index].n_atts; i++)
12888  {
12889  attrids[i] = attrinfo->last_classrepr->indexes[btid_index].atts[i]->id;
12890  }
12891  }
12892 
12893  return ret;
12894 }
12895 
12896 /*
12897  * heap_indexinfo_get_attrs_prefix_length () -
12898  * return: NO_ERROR
12899  * btid_index(in):
12900  * attrinfo(in):
12901  * keys_prefix_length(in/out):
12902  */
12903 int
12904 heap_indexinfo_get_attrs_prefix_length (int btid_index, HEAP_CACHE_ATTRINFO * attrinfo, int *attrs_prefix_length,
12905  int len_attrs_prefix_length)
12906 {
12907  int i, length = -1;
12908  int ret = NO_ERROR;
12909 
12910  if (attrs_prefix_length && len_attrs_prefix_length > 0)
12911  {
12912  for (i = 0; i < len_attrs_prefix_length; i++)
12913  {
12914  attrs_prefix_length[i] = -1;
12915  }
12916  }
12917 
12918  if (btid_index != -1 && (btid_index < attrinfo->last_classrepr->n_indexes))
12919  {
12920  if (attrinfo->last_classrepr->indexes[btid_index].attrs_prefix_length && attrs_prefix_length)
12921  {
12922  length = MIN (attrinfo->last_classrepr->indexes[btid_index].n_atts, len_attrs_prefix_length);
12923  for (i = 0; i < length; i++)
12924  {
12925  attrs_prefix_length[i] = attrinfo->last_classrepr->indexes[btid_index].attrs_prefix_length[i];
12926  }
12927  }
12928  }
12929 
12930  return ret;
12931 }
12932 
12933 /*
12934  * heap_get_index_with_name () - get BTID of index with name index_name
12935  * return : error code or NO_ERROR
12936  * thread_p (in) :
12937  * class_oid (in) : class OID
12938  * index_name (in): index name
12939  * btid (in/out) : btid
12940  */
12941 int
12942 heap_get_index_with_name (THREAD_ENTRY * thread_p, OID * class_oid, const char *index_name, BTID * btid)
12943 {
12944  OR_CLASSREP *classrep = NULL;
12945  int idx_in_cache, i;
12946  int error = NO_ERROR;
12947 
12948  BTID_SET_NULL (btid);
12949 
12950  /* get the class representation so that we can access the indexes */
12951  classrep = heap_classrepr_get (thread_p, class_oid, NULL, NULL_REPRID, &idx_in_cache);
12952  if (classrep == NULL)
12953  {
12954  return ER_FAILED;
12955  }
12956 
12957  for (i = 0; i < classrep->n_indexes; i++)
12958  {
12959  if (strcasecmp (classrep->indexes[i].btname, index_name) == 0)
12960  {
12961  BTID_COPY (btid, &classrep->indexes[i].btid);
12962  break;
12963  }
12964  }
12965  if (classrep != NULL)
12966  {
12967  heap_classrepr_free_and_init (classrep, &idx_in_cache);
12968  }
12969 
12970  return error;
12971 }
12972 
12973 /*
12974  * heap_get_indexinfo_of_btid () -
12975  * return: NO_ERROR
12976  * class_oid(in):
12977  * btid(in):
12978  * type(in):
12979  * num_attrs(in):
12980  * attr_ids(in):
12981  * btnamepp(in);
12982  */
12983 int
12984 heap_get_indexinfo_of_btid (THREAD_ENTRY * thread_p, const OID * class_oid, const BTID * btid, BTREE_TYPE * type,
12985  int *num_attrs, ATTR_ID ** attr_ids, int **attrs_prefix_length, char **btnamepp,
12986  int *func_index_col_id)
12987 {
12988  OR_CLASSREP *classrepp;
12989  OR_INDEX *indexp;
12990  int idx_in_cache, i, n = 0;
12991  int idx;
12992  int ret = NO_ERROR;
12993 
12994  /* initial value of output parameters */
12995  if (num_attrs)
12996  {
12997  *num_attrs = 0;
12998  }
12999 
13000  if (attr_ids)
13001  {
13002  *attr_ids = NULL;
13003  }
13004 
13005  if (btnamepp)
13006  {
13007  *btnamepp = NULL;
13008  }
13009 
13010  if (attrs_prefix_length)
13011  {
13012  *attrs_prefix_length = NULL;
13013  }
13014 
13015  if (func_index_col_id)
13016  {
13017  *func_index_col_id = -1;
13018  }
13019 
13020  /* get the class representation so that we can access the indexes */
13021  classrepp = heap_classrepr_get (thread_p, class_oid, NULL, NULL_REPRID, &idx_in_cache);
13022  if (classrepp == NULL)
13023  {
13024  goto exit_on_error;
13025  }
13026 
13027  /* get the idx of the index which corresponds to the BTID */
13028  idx = heap_classrepr_find_index_id (classrepp, btid);
13029  if (idx < 0)
13030  {
13031  goto exit_on_error;
13032  }
13033  indexp = &classrepp->indexes[idx];
13034 
13035  /* get the type of this index */
13036  if (type)
13037  {
13038  *type = indexp->type;
13039  }
13040 
13041  /* get the number of attributes associated with this index */
13042  if (num_attrs)
13043  {
13044  *num_attrs = n = indexp->n_atts;
13045  }
13046  /* allocate a new attribute ID array */
13047  if (attr_ids)
13048  {
13049  *attr_ids = (ATTR_ID *) db_private_alloc (thread_p, n * sizeof (ATTR_ID));
13050 
13051  if (*attr_ids == NULL)
13052  {
13053  goto exit_on_error;
13054  }
13055 
13056  /* fill the array with the attribute ID's */
13057  for (i = 0; i < n; i++)
13058  {
13059  (*attr_ids)[i] = indexp->atts[i]->id;
13060  }
13061  }
13062 
13063  if (btnamepp)
13064  {
13065  *btnamepp = strdup (indexp->btname);
13066  }
13067 
13068  if (attrs_prefix_length && indexp->type == BTREE_INDEX)
13069  {
13070  *attrs_prefix_length = (int *) db_private_alloc (thread_p, n * sizeof (int));
13071 
13072  if (*attrs_prefix_length == NULL)
13073  {
13074  goto exit_on_error;
13075  }
13076 
13077  for (i = 0; i < n; i++)
13078  {
13079  if (indexp->attrs_prefix_length != NULL)
13080  {
13081  (*attrs_prefix_length)[i] = indexp->attrs_prefix_length[i];
13082  }
13083  else
13084  {
13085  (*attrs_prefix_length)[i] = -1;
13086  }
13087  }
13088  }
13089 
13090  if (func_index_col_id && indexp->func_index_info)
13091  {
13092  *func_index_col_id = indexp->func_index_info->col_id;
13093  }
13094 
13095  /* free the class representation */
13096  heap_classrepr_free_and_init (classrepp, &idx_in_cache);
13097 
13098  return ret;
13099 
13100 exit_on_error:
13101 
13102  if (attr_ids && *attr_ids)
13103  {
13104  db_private_free_and_init (thread_p, *attr_ids);
13105  }
13106 
13107  if (btnamepp && *btnamepp)
13108  {
13109  free_and_init (*btnamepp);
13110  }
13111 
13112  if (attrs_prefix_length)
13113  {
13114  if (*attrs_prefix_length)
13115  {
13116  db_private_free_and_init (thread_p, *attrs_prefix_length);
13117  }
13118  *attrs_prefix_length = NULL;
13119  }
13120 
13121  if (classrepp)
13122  {
13123  heap_classrepr_free_and_init (classrepp, &idx_in_cache);
13124  }
13125 
13126  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
13127 }
13128 
13129 /*
13130  * heap_get_referenced_by () - Find objects referenced by given object
13131  * return: int (object count or -1)
13132  * class_oid(in):
13133  * obj_oid(in): The object identifier
13134  * recdes(in): Object disk representation
13135  * max_oid_cnt(in/out): Size of OID list in OIDs
13136  * oid_list(in): Set to the array of referenced OIDs
13137  * (This area can be realloc, thus, it should have been
13138  * with malloc)
13139  *
13140  * Note: This function finds object identifiers referenced by the
13141  * given instance. If OID references are stored in the given
13142  * OID list. If the oid_list is not large enough to hold the
13143  * number of instances, the area (i.e., oid_list) is expanded
13144  * using realloc. The number of OID references is returned by the
13145  * function.
13146  *
13147  * Note: The oid_list pointer should be freed by the caller.
13148  * Note: Nested-sets, that is, set-of-sets inside the object are not traced.
13149  * Note: This function does not remove duplicate oids from the list, the
13150  * caller is responsible for checking and removing them if needed.
13151  */
13152 int
13153 heap_get_referenced_by (THREAD_ENTRY * thread_p, OID * class_oid, const OID * obj_oid, RECDES * recdes,
13154  int *max_oid_cnt, OID ** oid_list)
13155 {
13156  HEAP_CACHE_ATTRINFO attr_info;
13157  DB_TYPE dbtype;
13158  HEAP_ATTRVALUE *value; /* Disk value Attr info for a particular attr */
13160  DB_SET *set;
13161  OID *oid_ptr; /* iterator on oid_list */
13162  OID *attr_oid;
13163  int oid_cnt; /* number of OIDs fetched */
13164  int cnt; /* set element count */
13165  int new_max_oid;
13166  int i, j; /* loop counters */
13167 
13168  /*
13169  * We don't support class references in this function
13170  */
13171  if (oid_is_root (class_oid))
13172  {
13173  return 0;
13174  }
13175 
13176  if ((heap_attrinfo_start_refoids (thread_p, class_oid, &attr_info) != NO_ERROR)
13177  || heap_attrinfo_read_dbvalues (thread_p, obj_oid, recdes, NULL, &attr_info) != NO_ERROR)
13178  {
13179  goto error;
13180  }
13181 
13182  if (*oid_list == NULL)
13183  {
13184  *max_oid_cnt = 0;
13185  }
13186  else if (*max_oid_cnt <= 0)
13187  {
13188  /*
13189  * We better release oid_list since we do not know it size. This may
13190  * be a bug.
13191  */
13192  free_and_init (*oid_list);
13193  *max_oid_cnt = 0;
13194  }
13195 
13196  /*
13197  * Now start searching the attributes that may reference objects
13198  */
13199  oid_cnt = 0;
13200  oid_ptr = *oid_list;
13201 
13202  for (i = 0; i < attr_info.num_values; i++)
13203  {
13204  value = &attr_info.values[i];
13205  dbtype = db_value_type (&value->dbvalue);
13206  if (dbtype == DB_TYPE_OID && !db_value_is_null (&value->dbvalue)
13207  && (attr_oid = db_get_oid (&value->dbvalue)) != NULL && !OID_ISNULL (attr_oid))
13208  {
13209  /*
13210  * A simple attribute with reference an object (OID)
13211  */
13212  if (oid_cnt == *max_oid_cnt)
13213  {
13214  /*
13215  * We need to expand the area to deposit more OIDs.
13216  * Use 50% of the current size for expansion and at least 10 OIDs
13217  */
13218  if (*max_oid_cnt <= 0)
13219  {
13220  *max_oid_cnt = 0;
13221  new_max_oid = attr_info.num_values;
13222  }
13223  else
13224  {
13225  new_max_oid = (int) (*max_oid_cnt * 1.5) + 1;
13226  if (new_max_oid < attr_info.num_values)
13227  {
13228  new_max_oid = attr_info.num_values;
13229  }
13230  }
13231 
13232  if (new_max_oid < 10)
13233  {
13234  new_max_oid = 10;
13235  }
13236 
13237  oid_ptr = (OID *) realloc (*oid_list, new_max_oid * sizeof (OID));
13238  if (oid_ptr == NULL)
13239  {
13240  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, new_max_oid * sizeof (OID));
13241  goto error;
13242  }
13243 
13244  /*
13245  * Set the pointers and advance to current area pointer
13246  */
13247  *oid_list = oid_ptr;
13248  oid_ptr += *max_oid_cnt;
13249  *max_oid_cnt = new_max_oid;
13250  }
13251  *oid_ptr = *attr_oid;
13252  oid_ptr++;
13253  oid_cnt++;
13254  }
13255  else
13256  {
13257  if (TP_IS_SET_TYPE (dbtype))
13258  {
13259  /*
13260  * A set which may or may nor reference objects (OIDs)
13261  * Go through each element of the set
13262  */
13263 
13264  set = db_get_set (&value->dbvalue);
13265  cnt = db_set_size (set);
13266 
13267  for (j = 0; j < cnt; j++)
13268  {
13269  if (db_set_get (set, j, &db_value) != NO_ERROR)
13270  {
13271  goto error;
13272  }
13273 
13276  && (attr_oid = db_get_oid (&db_value)) != NULL && !OID_ISNULL (attr_oid))
13277  {
13278  if (oid_cnt == *max_oid_cnt)
13279  {
13280  /*
13281  * We need to expand the area to deposit more OIDs.
13282  * Use 50% of the current size for expansion.
13283  */
13284  if (*max_oid_cnt <= 0)
13285  {
13286  *max_oid_cnt = 0;
13287  new_max_oid = attr_info.num_values;
13288  }
13289  else
13290  {
13291  new_max_oid = (int) (*max_oid_cnt * 1.5) + 1;
13292  if (new_max_oid < attr_info.num_values)
13293  {
13294  new_max_oid = attr_info.num_values;
13295  }
13296  }
13297  if (new_max_oid < 10)
13298  {
13299  new_max_oid = 10;
13300  }
13301 
13302  oid_ptr = (OID *) realloc (*oid_list, new_max_oid * sizeof (OID));
13303  if (oid_ptr == NULL)
13304  {
13306  new_max_oid * sizeof (OID));
13307  goto error;
13308  }
13309 
13310  /*
13311  * Set the pointers and advance to current area pointer
13312  */
13313  *oid_list = oid_ptr;
13314  oid_ptr += *max_oid_cnt;
13315  *max_oid_cnt = new_max_oid;
13316  }
13317  *oid_ptr = *attr_oid;
13318  oid_ptr++;
13319  oid_cnt++;
13320  }
13321  }
13322  }
13323  }
13324  }
13325 
13326  /* free object area if no OIDs were encountered */
13327  if (oid_cnt == 0)
13328  /*
13329  * Unless we check whether *oid_list is NULL,
13330  * it may cause double-free of oid_list.
13331  */
13332  if (*oid_list != NULL)
13333  {
13334  free_and_init (*oid_list);
13335  }
13336 
13337  heap_attrinfo_end (thread_p, &attr_info);
13338 
13339  /* return number of OIDs fetched */
13340  return oid_cnt;
13341 
13342 error:
13343  /* XXXXXXX */
13344 
13345  free_and_init (*oid_list);
13346  *max_oid_cnt = 0;
13347  heap_attrinfo_end (thread_p, &attr_info);
13348 
13349  return ER_FAILED;
13350 }
13351 
13352 /*
13353  * heap_prefetch () - Prefetch objects
13354  * return: NO_ERROR
13355  * fetch_area is set to point to fetching area
13356  * class_oid(in): Class identifier for the instance oid
13357  * oid(in): Object that must be fetched if its cached state is invalid
13358  * prefetch(in): Prefetch structure
13359  *
13360  */
13361 int
13362 heap_prefetch (THREAD_ENTRY * thread_p, OID * class_oid, const OID * oid, LC_COPYAREA_DESC * prefetch)
13363 {
13364  VPID vpid;
13365  PAGE_PTR pgptr = NULL;
13366  int round_length;
13367  INT16 right_slotid, left_slotid;
13368  HEAP_DIRECTION direction;
13369  SCAN_CODE scan;
13370  int ret = NO_ERROR;
13371 
13372  /*
13373  * Prefetch other instances (i.e., neighbors) stored on the same page
13374  * of the given object OID. Relocated instances and instances in overflow are
13375  * not prefetched, nor instances that do not belong to the given class.
13376  * Prefetching stop once an error, such as out of space, is encountered.
13377  */
13378 
13379  vpid.volid = oid->volid;
13380  vpid.pageid = oid->pageid;
13381 
13382  pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, NULL);
13383  if (pgptr == NULL)
13384  {
13385  assert (er_errid () != NO_ERROR);
13386  ret = er_errid ();
13387  if (ret == ER_PB_BAD_PAGEID)
13388  {
13389  ret = ER_HEAP_UNKNOWN_OBJECT;
13390  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ret, 3, oid->volid, oid->pageid, oid->slotid);
13391  }
13392 
13393  /*
13394  * Problems getting the page.. forget about prefetching...
13395  */
13396  return ret;
13397  }
13398 
13399  right_slotid = oid->slotid;
13400  left_slotid = oid->slotid;
13401  direction = HEAP_DIRECTION_BOTH;
13402 
13403  while (direction != HEAP_DIRECTION_NONE)
13404  {
13405  /*
13406  * Don't include the desired object again, forwarded instances, nor
13407  * instances that belong to other classes
13408  */
13409 
13410  /* Check to the right */
13411  if (direction == HEAP_DIRECTION_RIGHT || direction == HEAP_DIRECTION_BOTH)
13412  {
13413  scan = spage_next_record (pgptr, &right_slotid, prefetch->recdes, COPY);
13414  if (scan == S_SUCCESS && spage_get_record_type (pgptr, right_slotid) == REC_HOME)
13415  {
13416  prefetch->mobjs->num_objs++;
13417  COPY_OID (&((*prefetch->obj)->class_oid), class_oid);
13418  (*prefetch->obj)->oid.volid = oid->volid;
13419  (*prefetch->obj)->oid.pageid = oid->pageid;
13420  (*prefetch->obj)->oid.slotid = right_slotid;
13421  (*prefetch->obj)->length = prefetch->recdes->length;
13422  (*prefetch->obj)->offset = *prefetch->offset;
13423  (*prefetch->obj)->operation = LC_FETCH;
13424  (*prefetch->obj) = LC_NEXT_ONEOBJ_PTR_IN_COPYAREA (*prefetch->obj);
13425  round_length = DB_ALIGN (prefetch->recdes->length, HEAP_MAX_ALIGN);
13426  *prefetch->offset += round_length;
13427  prefetch->recdes->data += round_length;
13428  prefetch->recdes->area_size -= (round_length + sizeof (*(*prefetch->obj)));
13429  }
13430  else if (scan != S_SUCCESS)
13431  {
13432  /* Stop prefetching objects from the right */
13433  direction = ((direction == HEAP_DIRECTION_BOTH) ? HEAP_DIRECTION_LEFT : HEAP_DIRECTION_NONE);
13434  }
13435  }
13436 
13437  /* Check to the left */
13438  if (direction == HEAP_DIRECTION_LEFT || direction == HEAP_DIRECTION_BOTH)
13439  {
13440  scan = spage_previous_record (pgptr, &left_slotid, prefetch->recdes, COPY);
13441  if (scan == S_SUCCESS && left_slotid != HEAP_HEADER_AND_CHAIN_SLOTID
13442  && spage_get_record_type (pgptr, left_slotid) == REC_HOME)
13443  {
13444  prefetch->mobjs->num_objs++;
13445  COPY_OID (&((*prefetch->obj)->class_oid), class_oid);
13446  (*prefetch->obj)->oid.volid = oid->volid;
13447  (*prefetch->obj)->oid.pageid = oid->pageid;
13448  (*prefetch->obj)->oid.slotid = left_slotid;
13449  (*prefetch->obj)->length = prefetch->recdes->length;
13450  (*prefetch->obj)->offset = *prefetch->offset;
13451  (*prefetch->obj)->operation = LC_FETCH;
13452  (*prefetch->obj) = LC_NEXT_ONEOBJ_PTR_IN_COPYAREA (*prefetch->obj);
13453  round_length = DB_ALIGN (prefetch->recdes->length, HEAP_MAX_ALIGN);
13454  *prefetch->offset += round_length;
13455  prefetch->recdes->data += round_length;
13456  prefetch->recdes->area_size -= (round_length + sizeof (*(*prefetch->obj)));
13457  }
13458  else if (scan != S_SUCCESS)
13459  {
13460  /* Stop prefetching objects from the right */
13461  direction = ((direction == HEAP_DIRECTION_BOTH) ? HEAP_DIRECTION_RIGHT : HEAP_DIRECTION_NONE);
13462  }
13463  }
13464  }
13465 
13466  pgbuf_unfix_and_init (thread_p, pgptr);
13467 
13468  return ret;
13469 }
13470 
13471 static DISK_ISVALID
13472 heap_check_all_pages_by_heapchain (THREAD_ENTRY * thread_p, HFID * hfid, HEAP_CHKALL_RELOCOIDS * chk_objs,
13473  INT32 * num_checked)
13474 {
13475  VPID vpid;
13476  VPID *vpidptr_ofpgptr;
13477  INT32 npages = 0;
13478  DISK_ISVALID valid_pg = DISK_VALID;
13479  bool spg_error = false;
13480  PGBUF_WATCHER pg_watcher;
13481  PGBUF_WATCHER old_pg_watcher;
13482 
13485 
13486  vpid.volid = hfid->vfid.volid;
13487  vpid.pageid = hfid->hpgid;
13488 
13489  while (!VPID_ISNULL (&vpid) && valid_pg == DISK_VALID)
13490  {
13491  npages++;
13492 
13493  valid_pg = file_check_vpid (thread_p, &hfid->vfid, &vpid);
13494  if (valid_pg != DISK_VALID)
13495  {
13496  break;
13497  }
13498 
13499  pg_watcher.pgptr =
13501  if (old_pg_watcher.pgptr != NULL)
13502  {
13503  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
13504  }
13505  if (pg_watcher.pgptr == NULL)
13506  {
13507  /* something went wrong, return */
13508  valid_pg = DISK_ERROR;
13509  break;
13510  }
13511 #ifdef SPAGE_DEBUG
13512  if (spage_check (thread_p, pg_watcher.pgptr) != NO_ERROR)
13513  {
13514  /* if spage has an error, try to go on. but, this page is corrupted. */
13515  spg_error = true;
13516  }
13517 #endif
13518 
13519  if (heap_vpid_next (thread_p, hfid, pg_watcher.pgptr, &vpid) != NO_ERROR)
13520  {
13521  pgbuf_ordered_unfix (thread_p, &pg_watcher);
13522  /* something went wrong, return */
13523  valid_pg = DISK_ERROR;
13524  break;
13525  }
13526 
13527  vpidptr_ofpgptr = pgbuf_get_vpid_ptr (pg_watcher.pgptr);
13528  if (VPID_EQ (&vpid, vpidptr_ofpgptr))
13529  {
13531  hfid->vfid.fileid, hfid->hpgid);
13532  VPID_SET_NULL (&vpid);
13533  valid_pg = DISK_ERROR;
13534  }
13535 
13536  if (chk_objs != NULL)
13537  {
13538  valid_pg = heap_chkreloc_next (thread_p, chk_objs, pg_watcher.pgptr);
13539  }
13540 
13541  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
13542  }
13543 
13544  if (old_pg_watcher.pgptr != NULL)
13545  {
13546  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
13547  }
13548  assert (pg_watcher.pgptr == NULL);
13549 
13550  *num_checked = npages;
13551  return (spg_error == true) ? DISK_ERROR : valid_pg;
13552 }
13553 
13554 #if defined (SA_MODE)
13555 /*
13556  * heap_file_map_chkreloc () - FILE_MAP_PAGE_FUNC to check relocations.
13557  *
13558  * return : error code
13559  * thread_p (in) : thread entry
13560  * page (in) : heap page pointer
13561  * stop (in) : not used
13562  * args (in) : HEAP_CHKALL_RELOCOIDS *
13563  */
13564 static int
13565 heap_file_map_chkreloc (THREAD_ENTRY * thread_p, PAGE_PTR * page, bool * stop, void *args)
13566 {
13568 
13569  DISK_ISVALID valid = DISK_VALID;
13570  int error_code = NO_ERROR;
13571 
13572  valid = heap_chkreloc_next (thread_p, chk_objs, *page);
13573  if (valid == DISK_INVALID)
13574  {
13575  assert_release (false);
13576  return ER_FAILED;
13577  }
13578  else if (valid == DISK_ERROR)
13579  {
13580  ASSERT_ERROR_AND_SET (error_code);
13581  return error_code;
13582  }
13583  return NO_ERROR;
13584 }
13585 
13586 /*
13587  * heap_check_all_pages_by_file_table () - check relocations using file table
13588  *
13589  * return : DISK_INVALID for unexpected errors, DISK_ERROR for expected errors, DISK_VALID for successful check
13590  * thread_p (in) : thread entry
13591  * hfid (in) : heap file identifier
13592  * chk_objs (in) : check relocation context
13593  */
13594 static DISK_ISVALID
13595 heap_check_all_pages_by_file_table (THREAD_ENTRY * thread_p, HFID * hfid, HEAP_CHKALL_RELOCOIDS * chk_objs)
13596 {
13597  int error_code = NO_ERROR;
13598 
13599  error_code =
13600  file_map_pages (thread_p, &hfid->vfid, PGBUF_LATCH_READ, PGBUF_UNCONDITIONAL_LATCH, heap_file_map_chkreloc,
13601  chk_objs);
13602  if (error_code == ER_FAILED)
13603  {
13604  assert_release (false);
13605  return DISK_INVALID;
13606  }
13607  else if (error_code != NO_ERROR)
13608  {
13609  ASSERT_ERROR ();
13610  return DISK_ERROR;
13611  }
13612  return DISK_VALID;
13613 }
13614 #endif /* SA_MODE */
13615 
13616 /*
13617  * heap_check_all_pages () - Validate all pages known by given heap vs file manger
13618  * return: DISK_INVALID, DISK_VALID, DISK_ERROR
13619  * hfid(in): : Heap identifier
13620  *
13621  * Note: Verify that all pages known by the given heap are valid. That
13622  * is, that they are valid from the point of view of the file manager.
13623  */
13626 {
13627  VPID vpid; /* Page-volume identifier */
13628  PAGE_PTR pgptr = NULL; /* Page pointer */
13629  HEAP_HDR_STATS *heap_hdr; /* Header of heap structure */
13630  RECDES hdr_recdes; /* Header record descriptor */
13631  DISK_ISVALID valid_pg = DISK_VALID;
13632  DISK_ISVALID valid = DISK_VALID;
13633  INT32 npages = 0;
13634  int i;
13636  HEAP_CHKALL_RELOCOIDS *chk_objs = &chk;
13637 #if defined (SA_MODE)
13638  int file_numpages;
13639 #endif /* SA_MODE */
13640 
13641  valid_pg = heap_chkreloc_start (chk_objs);
13642  if (valid_pg != DISK_VALID)
13643  {
13644  chk_objs = NULL;
13645  }
13646  else
13647  {
13648  chk_objs->verify_not_vacuumed = true;
13649  }
13650 
13651  /* Scan every page of the heap to find out if they are valid */
13652  valid_pg = heap_check_all_pages_by_heapchain (thread_p, hfid, chk_objs, &npages);
13653 
13654 #if defined (SA_MODE)
13655  if (file_get_num_user_pages (thread_p, &hfid->vfid, &file_numpages) != NO_ERROR)
13656  {
13657  ASSERT_ERROR ();
13658  return valid_pg == DISK_VALID ? DISK_ERROR : valid_pg;
13659  }
13660  if (file_numpages != -1 && file_numpages != npages)
13661  {
13662  DISK_ISVALID tmp_valid_pg = DISK_VALID;
13663 
13664  assert (false);
13665  if (chk_objs != NULL)
13666  {
13667  chk_objs->verify = false;
13668  (void) heap_chkreloc_end (chk_objs);
13669 
13670  tmp_valid_pg = heap_chkreloc_start (chk_objs);
13671  }
13672 
13673  /*
13674  * Scan every page of the heap using allocset.
13675  * This is for getting more information of the corrupted pages.
13676  */
13677  tmp_valid_pg = heap_check_all_pages_by_file_table (thread_p, hfid, chk_objs);
13678 
13679  if (chk_objs != NULL)
13680  {
13681  if (tmp_valid_pg == DISK_VALID)
13682  {
13683  tmp_valid_pg = heap_chkreloc_end (chk_objs);
13684  }
13685  else
13686  {
13687  chk_objs->verify = false;
13688  (void) heap_chkreloc_end (chk_objs);
13689  }
13690  }
13691 
13692  if (npages != file_numpages)
13693  {
13695  hfid->hpgid, npages, file_numpages);
13696  valid_pg = DISK_INVALID;
13697  }
13698  if (valid_pg == DISK_VALID && tmp_valid_pg != DISK_VALID)
13699  {
13700  valid_pg = tmp_valid_pg;
13701  }
13702  }
13703  else
13704 #endif /* SA_MODE */
13705  {
13706  if (chk_objs != NULL)
13707  {
13708  valid_pg = heap_chkreloc_end (chk_objs);
13709  }
13710  }
13711 
13712  if (valid_pg == DISK_VALID)
13713  {
13714  /*
13715  * Check the statistics entries in the header
13716  */
13717 
13718  /* Fetch the header page of the heap file */
13719  vpid.volid = hfid->vfid.volid;
13720  vpid.pageid = hfid->hpgid;
13721 
13722  pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, NULL);
13723  if (pgptr == NULL)
13724  {
13725  return DISK_ERROR;
13726  }
13727 
13728  (void) pgbuf_check_page_ptype (thread_p, pgptr, PAGE_HEAP);
13729 
13730  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
13731  {
13732  /* Unable to peek heap header record */
13733  pgbuf_unfix_and_init (thread_p, pgptr);
13734 
13735  return DISK_ERROR;
13736  }
13737 
13738  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
13739  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS && valid_pg != DISK_ERROR; i++)
13740  {
13741  if (!VPID_ISNULL (&heap_hdr->estimates.best[i].vpid))
13742  {
13743  valid = file_check_vpid (thread_p, &hfid->vfid, &heap_hdr->estimates.best[i].vpid);
13744  if (valid != DISK_VALID)
13745  {
13746  valid_pg = valid;
13747  break;
13748  }
13749  }
13750  }
13751 
13752 #if defined(SA_MODE)
13754  {
13755  HEAP_STATS_ENTRY *ent;
13756  void *last;
13757  int rc;
13758 
13759  rc = pthread_mutex_lock (&heap_Bestspace->bestspace_mutex);
13760 
13761  last = NULL;
13762  while ((ent = (HEAP_STATS_ENTRY *) mht_get2 (heap_Bestspace->hfid_ht, hfid, &last)) != NULL)
13763  {
13764  assert_release (!VPID_ISNULL (&ent->best.vpid));
13765  if (!VPID_ISNULL (&ent->best.vpid))
13766  {
13767  valid_pg = file_check_vpid (thread_p, &hfid->vfid, &ent->best.vpid);
13768  if (valid_pg != DISK_VALID)
13769  {
13770  break;
13771  }
13772  }
13773  assert_release (ent->best.freespace > 0);
13774  }
13775 
13776  assert (mht_count (heap_Bestspace->vpid_ht) == mht_count (heap_Bestspace->hfid_ht));
13777 
13778  pthread_mutex_unlock (&heap_Bestspace->bestspace_mutex);
13779  }
13780 #endif
13781 
13782  pgbuf_unfix_and_init (thread_p, pgptr);
13783 
13784  /* Need to check for the overflow pages.... */
13785  }
13786 
13787  return valid_pg;
13788 }
13789 
13792 {
13793  FILE_TYPE file_type;
13794  VPID vpid;
13796 #if !defined (NDEBUG)
13797  FILE_DESCRIPTORS fdes;
13798 #endif /* !NDEBUG */
13799 
13800  if (file_get_type (thread_p, &hfid->vfid, &file_type) != NO_ERROR)
13801  {
13802  return DISK_ERROR;
13803  }
13804  if (file_type == FILE_UNKNOWN_TYPE || (file_type != FILE_HEAP && file_type != FILE_HEAP_REUSE_SLOTS))
13805  {
13806  assert_release (false);
13807  return DISK_INVALID;
13808  }
13809 
13810  if (heap_get_header_page (thread_p, hfid, &vpid) == NO_ERROR)
13811  {
13812  hfid->hpgid = vpid.pageid;
13813 
13814 #if !defined (NDEBUG)
13815  if (file_descriptor_get (thread_p, &hfid->vfid, &fdes) == NO_ERROR && !OID_ISNULL (&fdes.heap.class_oid))
13816  {
13818  LOG_FIND_THREAD_TRAN_INDEX (thread_p), SCH_S_LOCK) == 1);
13819  }
13820 #endif /* NDEBUG */
13821  rv = heap_check_all_pages (thread_p, hfid);
13822  if (rv == DISK_INVALID)
13823  {
13824  assert_release (false);
13825  }
13826  else if (rv == DISK_ERROR)
13827  {
13828  ASSERT_ERROR ();
13829  }
13830  return rv;
13831  }
13832  else
13833  {
13834  ASSERT_ERROR ();
13835  return DISK_ERROR;
13836  }
13837 }
13838 
13839 /*
13840  * heap_check_all_heaps () - Validate all pages of all known heap files
13841  * return: DISK_INVALID, DISK_VALID, DISK_ERROR
13842  *
13843  * Note: Verify that all pages of all heap files are valid. That is,
13844  * that they are valid from the point of view of the file manager.
13845  */
13848 {
13849  int error_code = NO_ERROR;
13850  HFID hfid;
13851  DISK_ISVALID allvalid = DISK_VALID;
13852  DISK_ISVALID valid = DISK_VALID;
13854  OID class_oid = OID_INITIALIZER;
13855 
13856  while (true)
13857  {
13858  /* Go to each file, check only the heap files */
13859  error_code = file_tracker_interruptable_iterate (thread_p, FILE_HEAP, &vfid, &class_oid);
13860  if (error_code != NO_ERROR)
13861  {
13862  ASSERT_ERROR ();
13863  goto exit_on_error;
13864  }
13865  if (VFID_ISNULL (&vfid))
13866  {
13867  /* no more heap files */
13868  break;
13869  }
13870 
13871  hfid.vfid = vfid;
13872  valid = heap_check_heap_file (thread_p, &hfid);
13873  if (valid == DISK_ERROR)
13874  {
13875  goto exit_on_error;
13876  }
13877  if (valid != DISK_VALID)
13878  {
13879  allvalid = valid;
13880  }
13881  }
13882  assert (OID_ISNULL (&class_oid));
13883 
13884  return allvalid;
13885 
13886 exit_on_error:
13887  if (!OID_ISNULL (&class_oid))
13888  {
13889  lock_unlock_object (thread_p, &class_oid, oid_Root_class_oid, SCH_S_LOCK, true);
13890  }
13891 
13892  return ((allvalid == DISK_VALID) ? DISK_ERROR : allvalid);
13893 }
13894 
13895 /*
13896  * heap_dump_hdr () - Dump heap file header
13897  * return: NO_ERROR
13898  * heap_hdr(in): Header structure
13899  */
13900 static int
13901 heap_dump_hdr (FILE * fp, HEAP_HDR_STATS * heap_hdr)
13902 {
13903  int i, j;
13904  int avg_length;
13905  int ret = NO_ERROR;
13906 
13907  avg_length = ((heap_hdr->estimates.num_recs > 0)
13908  ? (int) ((heap_hdr->estimates.recs_sumlen / (float) heap_hdr->estimates.num_recs) + 0.9) : 0);
13909 
13910  fprintf (fp, "CLASS_OID = %2d|%4d|%2d, ", heap_hdr->class_oid.volid, heap_hdr->class_oid.pageid,
13911  heap_hdr->class_oid.slotid);
13912  fprintf (fp, "OVF_VFID = %4d|%4d, NEXT_VPID = %4d|%4d\n", heap_hdr->ovf_vfid.volid, heap_hdr->ovf_vfid.fileid,
13913  heap_hdr->next_vpid.volid, heap_hdr->next_vpid.pageid);
13914  fprintf (fp, "unfill_space = %4d\n", heap_hdr->unfill_space);
13915  fprintf (fp, "Estimated: num_pages = %d, num_recs = %d, avg reclength = %d\n", heap_hdr->estimates.num_pages,
13916  heap_hdr->estimates.num_recs, avg_length);
13917  fprintf (fp, "Estimated: num high best = %d, num others(not in array) high best = %d\n",
13918  heap_hdr->estimates.num_high_best, heap_hdr->estimates.num_other_high_best);
13919  fprintf (fp, "Hint of best set of vpids with head = %d\n", heap_hdr->estimates.head);
13920 
13921  for (j = 0, i = 0; i < HEAP_NUM_BEST_SPACESTATS; j++, i++)
13922  {
13923  if (j != 0 && j % 5 == 0)
13924  {
13925  fprintf (fp, "\n");
13926  }
13927  fprintf (fp, "%4d|%4d %4d,", heap_hdr->estimates.best[i].vpid.volid, heap_hdr->estimates.best[i].vpid.pageid,
13928  heap_hdr->estimates.best[i].freespace);
13929  }
13930  fprintf (fp, "\n");
13931 
13932  fprintf (fp, "Second best: num hints = %d, head of hints = %d, tail (next to insert) of hints = %d, num subs = %d\n",
13933  heap_hdr->estimates.num_second_best, heap_hdr->estimates.head_second_best,
13934  heap_hdr->estimates.tail_second_best, heap_hdr->estimates.num_substitutions);
13935  for (j = 0, i = 0; i < HEAP_NUM_BEST_SPACESTATS; j++, i++)
13936  {
13937  if (j != 0 && j % 5 == 0)
13938  {
13939  fprintf (fp, "\n");
13940  }
13941  fprintf (fp, "%4d|%4d,", heap_hdr->estimates.second_best[i].volid, heap_hdr->estimates.second_best[i].pageid);
13942  }
13943  fprintf (fp, "\n");
13944 
13945  fprintf (fp, "Last vpid = %4d|%4d\n", heap_hdr->estimates.last_vpid.volid, heap_hdr->estimates.last_vpid.pageid);
13946 
13947  fprintf (fp, "Next full search vpid = %4d|%4d\n", heap_hdr->estimates.full_search_vpid.volid,
13948  heap_hdr->estimates.full_search_vpid.pageid);
13949 
13950  return ret;
13951 }
13952 
13953 /*
13954  * heap_dump () - Dump heap file
13955  * return:
13956  * hfid(in): Heap file identifier
13957  * dump_records(in): If true, objects are printed in ascii format, otherwise, the
13958  * objects are not printed.
13959  *
13960  * Note: Dump a heap file. The objects are printed only when the value
13961  * of dump_records is true. This function is used for DEBUGGING PURPOSES.
13962  */
13963 void
13964 heap_dump (THREAD_ENTRY * thread_p, FILE * fp, HFID * hfid, bool dump_records)
13965 {
13966  VPID vpid; /* Page-volume identifier */
13967  HEAP_HDR_STATS *heap_hdr; /* Header of heap structure */
13968  RECDES hdr_recdes; /* Header record descriptor */
13969  VFID ovf_vfid;
13970  OID oid;
13971  HEAP_SCANCACHE scan_cache;
13972  HEAP_CACHE_ATTRINFO attr_info;
13973  RECDES peek_recdes;
13974  FILE_DESCRIPTORS fdes;
13975  int ret = NO_ERROR;
13976  PGBUF_WATCHER pg_watcher;
13977  PGBUF_WATCHER old_pg_watcher;
13978 
13981 
13982  fprintf (fp, "\n\n*** DUMPING HEAP FILE: ");
13983  fprintf (fp, "volid = %d, Fileid = %d, Header-pageid = %d ***\n", hfid->vfid.volid, hfid->vfid.fileid, hfid->hpgid);
13984  (void) file_descriptor_dump (thread_p, &hfid->vfid, fp);
13985 
13986  /* Fetch the header page of the heap file */
13987 
13988  vpid.volid = hfid->vfid.volid;
13989  vpid.pageid = hfid->hpgid;
13990  pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, &pg_watcher);
13991  if (pg_watcher.pgptr == NULL)
13992  {
13993  /* Unable to fetch heap header page */
13994  return;
13995  }
13996 
13997  /* Peek the header record to dump the statistics */
13998 
13999  if (spage_get_record (thread_p, pg_watcher.pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
14000  {
14001  /* Unable to peek heap header record */
14002  pgbuf_ordered_unfix (thread_p, &pg_watcher);
14003  return;
14004  }
14005 
14006  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
14007  ret = heap_dump_hdr (fp, heap_hdr);
14008  if (ret != NO_ERROR)
14009  {
14010  pgbuf_ordered_unfix (thread_p, &pg_watcher);
14011  return;
14012  }
14013 
14014  VFID_COPY (&ovf_vfid, &heap_hdr->ovf_vfid);
14015  pgbuf_ordered_unfix (thread_p, &pg_watcher);
14016 
14017  /* now scan every page and dump it */
14018  vpid.volid = hfid->vfid.volid;
14019  vpid.pageid = hfid->hpgid;
14020  while (!VPID_ISNULL (&vpid))
14021  {
14022  pg_watcher.pgptr =
14024  if (old_pg_watcher.pgptr != NULL)
14025  {
14026  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
14027  }
14028  if (pg_watcher.pgptr == NULL)
14029  {
14030  /* something went wrong, return */
14031  return;
14032  }
14033  spage_dump (thread_p, fp, pg_watcher.pgptr, dump_records);
14034  (void) heap_vpid_next (thread_p, hfid, pg_watcher.pgptr, &vpid);
14035  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
14036  }
14037 
14038  if (old_pg_watcher.pgptr != NULL)
14039  {
14040  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
14041  }
14042  assert (pg_watcher.pgptr == NULL);
14043 
14044  /* Dump file table configuration */
14045  if (file_dump (thread_p, &hfid->vfid, fp) != NO_ERROR)
14046  {
14047  ASSERT_ERROR ();
14048  return;
14049  }
14050 
14051  if (!VFID_ISNULL (&ovf_vfid))
14052  {
14053  /* There is an overflow file for this heap file */
14054  fprintf (fp, "\nOVERFLOW FILE INFORMATION FOR HEAP FILE\n\n");
14055  if (file_dump (thread_p, &ovf_vfid, fp) != NO_ERROR)
14056  {
14057  ASSERT_ERROR ();
14058  return;
14059  }
14060  }
14061 
14062  /*
14063  * Dump schema definition
14064  */
14065 
14066  if (file_descriptor_get (thread_p, &hfid->vfid, &fdes) != NO_ERROR)
14067  {
14068  ASSERT_ERROR ();
14069  return;
14070  }
14071 
14072  if (!OID_ISNULL (&fdes.heap.class_oid))
14073  {
14074  if (heap_attrinfo_start (thread_p, &fdes.heap.class_oid, -1, NULL, &attr_info) != NO_ERROR)
14075  {
14076  return;
14077  }
14078 
14079  ret = heap_classrepr_dump (thread_p, fp, &fdes.heap.class_oid, attr_info.last_classrepr);
14080  if (ret != NO_ERROR)
14081  {
14082  heap_attrinfo_end (thread_p, &attr_info);
14083  return;
14084  }
14085 
14086  /* Dump individual Objects */
14087  if (dump_records == true)
14088  {
14089  if (heap_scancache_start (thread_p, &scan_cache, hfid, NULL, true, false, NULL) != NO_ERROR)
14090  {
14091  /* something went wrong, return */
14092  heap_attrinfo_end (thread_p, &attr_info);
14093  return;
14094  }
14095 
14096  OID_SET_NULL (&oid);
14097  oid.volid = hfid->vfid.volid;
14098 
14099  while (heap_next (thread_p, hfid, NULL, &oid, &peek_recdes, &scan_cache, PEEK) == S_SUCCESS)
14100  {
14101  fprintf (fp, "Object-OID = %2d|%4d|%2d,\n Length on disk = %d,\n", oid.volid, oid.pageid, oid.slotid,
14102  peek_recdes.length);
14103 
14104  if (heap_attrinfo_read_dbvalues (thread_p, &oid, &peek_recdes, NULL, &attr_info) != NO_ERROR)
14105  {
14106  fprintf (fp, " Error ... continue\n");
14107  continue;
14108  }
14109  heap_attrinfo_dump (thread_p, fp, &attr_info, false);
14110  }
14111  heap_scancache_end (thread_p, &scan_cache);
14112  }
14113  heap_attrinfo_end (thread_p, &attr_info);
14114  }
14115  else
14116  {
14117  /* boot_Db_parm.hfid */
14118  }
14119 
14120  fprintf (fp, "\n\n*** END OF DUMP FOR HEAP FILE ***\n\n");
14121 }
14122 
14123 /*
14124  * heap_dump_capacity () - dump heap file capacity
14125  *
14126  * return : error code
14127  * thread_p (in) : thread entry
14128  * fp (in) : output file
14129  * hfid (in) : heap file identifier
14130  */
14131 int
14132 heap_dump_capacity (THREAD_ENTRY * thread_p, FILE * fp, const HFID * hfid)
14133 {
14134  INT64 num_recs = 0;
14135  INT64 num_recs_relocated = 0;
14136  INT64 num_recs_inovf = 0;
14137  INT64 num_pages = 0;
14138  int avg_freespace = 0;
14139  int avg_freespace_nolast = 0;
14140  int avg_reclength = 0;
14141  int avg_overhead = 0;
14142  HEAP_CACHE_ATTRINFO attr_info;
14143  FILE_DESCRIPTORS fdes;
14144 
14145  int error_code = NO_ERROR;
14146 
14147  fprintf (fp, "IO_PAGESIZE = %d, DB_PAGESIZE = %d, Recv_overhead = %d\n", IO_PAGESIZE, DB_PAGESIZE,
14149 
14150  /* Go to each file, check only the heap files */
14151  error_code =
14152  heap_get_capacity (thread_p, hfid, &num_recs, &num_recs_relocated, &num_recs_inovf, &num_pages, &avg_freespace,
14153  &avg_freespace_nolast, &avg_reclength, &avg_overhead);
14154  if (error_code != NO_ERROR)
14155  {
14156  ASSERT_ERROR ();
14157  return error_code;
14158  }
14159  fprintf (fp, "HFID:%d|%d|%d, Num_recs = %" PRId64 ", Num_reloc_recs = %" PRId64 ",\n Num_recs_inovf = %" PRId64
14160  ", Avg_reclength = %d,\n Num_pages = %" PRId64 ", Avg_free_space_per_page = %d,\n"
14161  " Avg_free_space_per_page_without_lastpage = %d\n Avg_overhead_per_page = %d\n",
14162  (int) hfid->vfid.volid, hfid->vfid.fileid, hfid->hpgid, num_recs, num_recs_relocated, num_recs_inovf,
14163  avg_reclength, num_pages, avg_freespace, avg_freespace_nolast, avg_overhead);
14164 
14165  /* Dump schema definition */
14166  error_code = file_descriptor_get (thread_p, &hfid->vfid, &fdes);
14167  if (error_code != NO_ERROR)
14168  {
14169  ASSERT_ERROR ();
14170  return error_code;
14171  }
14172 
14173  if (!OID_ISNULL (&fdes.heap.class_oid))
14174  {
14175  error_code = heap_attrinfo_start (thread_p, &fdes.heap.class_oid, -1, NULL, &attr_info);
14176  if (error_code != NO_ERROR)
14177  {
14178  ASSERT_ERROR ();
14179  return error_code;
14180  }
14181  (void) heap_classrepr_dump (thread_p, fp, &fdes.heap.class_oid, attr_info.last_classrepr);
14182  heap_attrinfo_end (thread_p, &attr_info);
14183  }
14184  else
14185  {
14186  /* boot_Db_parm.hfid */
14187  }
14188 
14189  fprintf (fp, "\n");
14190  return NO_ERROR;
14191 }
14192 
14193 /*
14194  * Check consistency of heap from the point of view of relocation
14195  */
14196 
14197 /*
14198  * heap_chkreloc_start () - Start validating consistency of relocated objects in
14199  * heap
14200  * return: DISK_VALID, DISK_INVALID, DISK_ERROR
14201  * chk(in): Structure for checking relocation objects
14202  *
14203  */
14204 static DISK_ISVALID
14205 heap_chkreloc_start (HEAP_CHKALL_RELOCOIDS * chk)
14206 {
14207  chk->ht = mht_create ("Validate Relocation entries hash table", HEAP_CHK_ADD_UNFOUND_RELOCOIDS, oid_hash,
14209  if (chk->ht == NULL)
14210  {
14211  chk->ht = NULL;
14212  chk->unfound_reloc_oids = NULL;
14213  chk->max_unfound_reloc = -1;
14214  chk->num_unfound_reloc = -1;
14215  return DISK_ERROR;
14216  }
14217 
14219  if (chk->unfound_reloc_oids == NULL)
14220  {
14223 
14224  if (chk->ht != NULL)
14225  {
14226  mht_destroy (chk->ht);
14227  }
14228 
14229  chk->ht = NULL;
14230  chk->unfound_reloc_oids = NULL;
14231  chk->max_unfound_reloc = -1;
14232  chk->num_unfound_reloc = -1;
14233  return DISK_ERROR;
14234  }
14235 
14237  chk->num_unfound_reloc = 0;
14238  chk->verify = true;
14239  chk->verify_not_vacuumed = false;
14241 
14242  return DISK_VALID;
14243 }
14244 
14245 /*
14246  * heap_chkreloc_end () - Finish validating consistency of relocated objects
14247  * in heap
14248  * return: DISK_VALID, DISK_INVALID, DISK_ERROR
14249  * chk(in): Structure for checking relocation objects
14250  *
14251  * Note: Scanning the unfound_reloc_oid list, remove those entries that
14252  * are also found in hash table (remove them from unfound_reloc
14253  * list and from hash table). At the end of the scan, if there
14254  * are any entries in either hash table or unfound_reloc_oid, the
14255  * heap is incosistent/corrupted.
14256  */
14257 static DISK_ISVALID
14258 heap_chkreloc_end (HEAP_CHKALL_RELOCOIDS * chk)
14259 {
14260  HEAP_CHK_RELOCOID *forward;
14261  DISK_ISVALID valid_reloc = DISK_VALID;
14262  int i;
14263 
14264  if (chk->not_vacuumed_res != DISK_VALID)
14265  {
14266  valid_reloc = chk->not_vacuumed_res;
14268  }
14269 
14270  /*
14271  * Check for any postponed unfound relocated OIDs that have not been
14272  * checked or found. If they are not in the hash table, it would be an
14273  * error. That is, we would have a relocated (content) object without an
14274  * object pointing to it. (relocation/home).
14275  */
14276  if (chk->verify == true)
14277  {
14278  for (i = 0; i < chk->num_unfound_reloc; i++)
14279  {
14280  forward = (HEAP_CHK_RELOCOID *) mht_get (chk->ht, &chk->unfound_reloc_oids[i]);
14281  if (forward != NULL)
14282  {
14283  /*
14284  * The entry was found.
14285  * Remove the entry and the memory space
14286  */
14287  /* mht_rem() has been updated to take a function and an arg pointer that can be called on the entry
14288  * before it is removed. We may want to take advantage of that here to free the memory associated with
14289  * the entry */
14290  if (mht_rem (chk->ht, &chk->unfound_reloc_oids[i], NULL, NULL) != NO_ERROR)
14291  {
14292  valid_reloc = DISK_ERROR;
14293  }
14294  else
14295  {
14296  free_and_init (forward);
14297  }
14298  }
14299  else
14300  {
14301  er_log_debug (ARG_FILE_LINE, "Unable to find relocation/home object for relocated_oid=%d|%d|%d\n",
14302  (int) chk->unfound_reloc_oids[i].volid, chk->unfound_reloc_oids[i].pageid,
14303  (int) chk->unfound_reloc_oids[i].slotid);
14304 #if defined (SA_MODE)
14306  valid_reloc = DISK_INVALID;
14307 #endif /* SA_MODE */
14308  }
14309  }
14310  }
14311 
14312  /*
14313  * If there are entries in the hash table, it would be problems. That is,
14314  * the relocated (content) objects were not found. That is, the home object
14315  * points to a dangling content object, or what it points is not a
14316  * relocated (newhome) object.
14317  */
14318 
14319  if (mht_count (chk->ht) > 0)
14320  {
14321  (void) mht_map (chk->ht, heap_chkreloc_print_notfound, chk);
14322 #if defined (SA_MODE)
14323  valid_reloc = DISK_INVALID;
14324 #endif /* !SA_MODE */
14325  }
14326 
14327  mht_destroy (chk->ht);
14329 
14330  return valid_reloc;
14331 }
14332 
14333 /*
14334  * heap_chkreloc_print_notfound () - Print entry that does not have a relocated entry
14335  * return: NO_ERROR
14336  * ignore_reloc_oid(in): Key (relocated entry to real entry) of hash table
14337  * ent(in): The entry associated with key (real oid)
14338  * xchk(in): Structure for checking relocation objects
14339  *
14340  * Note: Print unfound relocated record information for this home
14341  * record with relocation address HEAP is inconsistent.
14342  */
14343 static int
14344 heap_chkreloc_print_notfound (const void *ignore_reloc_oid, void *ent, void *xchk)
14345 {
14346  HEAP_CHK_RELOCOID *forward = (HEAP_CHK_RELOCOID *) ent;
14348 
14349  if (chk->verify == true)
14350  {
14352  "Unable to find relocated record with oid=%d|%d|%d for home object with oid=%d|%d|%d\n",
14353  (int) forward->reloc_oid.volid, forward->reloc_oid.pageid, (int) forward->reloc_oid.slotid,
14354  (int) forward->real_oid.volid, forward->real_oid.pageid, (int) forward->real_oid.slotid);
14355 #if defined (SA_MODE)
14357 #endif /* SA_MODE */
14358  }
14359  /* mht_rem() has been updated to take a function and an arg pointer that can be called on the entry before it is
14360  * removed. We may want to take advantage of that here to free the memory associated with the entry */
14361  (void) mht_rem (chk->ht, &forward->reloc_oid, NULL, NULL);
14362  free_and_init (forward);
14363 
14364  return NO_ERROR;
14365 }
14366 
14367 /*
14368  * heap_chkreloc_next () - Verify consistency of relocation records on page heap
14369  * return: DISK_VALID, DISK_INVALID, DISK_ERROR
14370  * thread_p(in) : thread context
14371  * chk(in): Structure for checking relocation objects
14372  * pgptr(in): Page pointer
14373  *
14374  * Note: While scanning objects of given page:
14375  * 1: if a relocation record is found, we check if that record
14376  * has already been seen (i.e., if it is in unfound_relc
14377  * list),
14378  * if it has been seen, we remove the entry from the
14379  * unfound_relc_oid list.
14380  * if it has not been seen, we add an entry to hash table
14381  * from reloc_oid to real_oid
14382  * Note: for optimization reasons, we may not scan the
14383  * unfound_reloc if it is too long, in this case the entry is
14384  * added to hash table.
14385  * 2: if a newhome (relocated) record is found, we check if the
14386  * real record has already been seen (i.e., check hash table),
14387  * if it has been seen, we remove the entry from hash table
14388  * otherwise, we add an entry into the unfound_reloc list
14389  */
14390 
14391 #define HEAP_CHKRELOC_UNFOUND_SHORT 5
14392 
14393 static DISK_ISVALID
14394 heap_chkreloc_next (THREAD_ENTRY * thread_p, HEAP_CHKALL_RELOCOIDS * chk, PAGE_PTR pgptr)
14395 {
14396  HEAP_CHK_RELOCOID *forward;
14397  INT16 type = REC_UNKNOWN;
14398  RECDES recdes;
14399  OID oid, class_oid;
14400  OID *peek_oid;
14401  void *ptr;
14402  bool found;
14403  int i;
14404 
14405  if (chk->verify != true)
14406  {
14407  return DISK_VALID;
14408  }
14409 
14410  if (chk->verify_not_vacuumed && heap_get_class_oid_from_page (thread_p, pgptr, &class_oid) != NO_ERROR)
14411  {
14413  return DISK_ERROR;
14414  }
14415 
14416  oid.volid = pgbuf_get_volume_id (pgptr);
14417  oid.pageid = pgbuf_get_page_id (pgptr);
14418  oid.slotid = 0; /* i.e., will get slot 1 */
14419 
14420  while (spage_next_record (pgptr, &oid.slotid, &recdes, PEEK) == S_SUCCESS)
14421  {
14423  {
14424  continue;
14425  }
14426  type = spage_get_record_type (pgptr, oid.slotid);
14427 
14428  switch (type)
14429  {
14430  case REC_RELOCATION:
14431  /*
14432  * The record stored on the page is a relocation record,
14433  * get the new home for the record
14434  *
14435  * If we have already entries waiting to be check and the list is
14436  * not that big, check them. Otherwise, wait until the end for the
14437  * check since searching the list may be expensive
14438  */
14439  peek_oid = (OID *) recdes.data;
14440  found = false;
14442  {
14443  /*
14444  * Go a head and check since the list is very short.
14445  */
14446  for (i = 0; i < chk->num_unfound_reloc; i++)
14447  {
14448  if (OID_EQ (&chk->unfound_reloc_oids[i], peek_oid))
14449  {
14450  /*
14451  * Remove it from the unfound list
14452  */
14453  if ((i + 1) != chk->num_unfound_reloc)
14454  {
14456  }
14457  chk->num_unfound_reloc--;
14458  found = true;
14459  break;
14460  }
14461  }
14462  }
14463  if (found == false)
14464  {
14465  /*
14466  * Add it to hash table
14467  */
14468  forward = (HEAP_CHK_RELOCOID *) malloc (sizeof (HEAP_CHK_RELOCOID));
14469  if (forward == NULL)
14470  {
14471  /*
14472  * Out of memory
14473  */
14475 
14476  return DISK_ERROR;
14477  }
14478  forward->real_oid = oid;
14479  forward->reloc_oid = *peek_oid;
14480  if (mht_put (chk->ht, &forward->reloc_oid, forward) == NULL)
14481  {
14482  /*
14483  * Failure in mht_put
14484  */
14485  return DISK_ERROR;
14486  }
14487  }
14488  break;
14489 
14490  case REC_BIGONE:
14491  if (chk->verify_not_vacuumed)
14492  {
14493  MVCC_REC_HEADER rec_header;
14494  PAGE_PTR overflow_page;
14495  DISK_ISVALID tmp_valid;
14496  VPID overflow_vpid;
14497  OID *overflow_oid;
14498 
14499  /* get overflow page id */
14500  overflow_oid = (OID *) recdes.data;
14501  overflow_vpid.volid = overflow_oid->volid;
14502  overflow_vpid.pageid = overflow_oid->pageid;
14503  if (VPID_ISNULL (&overflow_vpid))
14504  {
14506  return DISK_ERROR;
14507  }
14508 
14509  /* fix page and get record */
14510  overflow_page =
14511  pgbuf_fix (thread_p, &overflow_vpid, OLD_PAGE, PGBUF_LATCH_READ, PGBUF_UNCONDITIONAL_LATCH);
14512  if (overflow_page == NULL)
14513  {
14515  return DISK_ERROR;
14516  }
14517  if (heap_get_mvcc_rec_header_from_overflow (overflow_page, &rec_header, &recdes) != NO_ERROR)
14518  {
14519  pgbuf_unfix_and_init (thread_p, overflow_page);
14521  return DISK_ERROR;
14522  }
14523  pgbuf_unfix_and_init (thread_p, overflow_page);
14524 
14525  /* check header */
14526  tmp_valid = vacuum_check_not_vacuumed_rec_header (thread_p, &oid, &class_oid, &rec_header, -1);
14527  switch (tmp_valid)
14528  {
14529  case DISK_VALID:
14530  break;
14531  case DISK_INVALID:
14533  break;
14534  case DISK_ERROR:
14535  default:
14537  return DISK_ERROR;
14538  break;
14539  }
14540  }
14541  break;
14542 
14543  case REC_HOME:
14544  if (chk->verify_not_vacuumed)
14545  {
14546  DISK_ISVALID tmp_valid = vacuum_check_not_vacuumed_recdes (thread_p, &oid, &class_oid,
14547  &recdes, -1);
14548  switch (tmp_valid)
14549  {
14550  case DISK_VALID:
14551  break;
14552  case DISK_INVALID:
14554  break;
14555  case DISK_ERROR:
14556  default:
14558  return DISK_ERROR;
14559  break;
14560  }
14561  }
14562  break;
14563 
14564  case REC_NEWHOME:
14565  if (chk->verify_not_vacuumed)
14566  {
14567  DISK_ISVALID tmp_valid = vacuum_check_not_vacuumed_recdes (thread_p, &oid, &class_oid,
14568  &recdes, -1);
14569  switch (tmp_valid)
14570  {
14571  case DISK_VALID:
14572  break;
14573  case DISK_INVALID:
14575  break;
14576  case DISK_ERROR:
14577  default:
14579  return DISK_ERROR;
14580  break;
14581  }
14582  }
14583 
14584  /*
14585  * Remove the object from hash table or insert the object in unfound
14586  * reloc check list.
14587  */
14588  forward = (HEAP_CHK_RELOCOID *) mht_get (chk->ht, &oid);
14589  if (forward != NULL)
14590  {
14591  /*
14592  * The entry was found.
14593  * Remove the entry and the memory space
14594  */
14595  /* mht_rem() has been updated to take a function and an arg pointer that can be called on the entry
14596  * before it is removed. We may want to take advantage of that here to free the memory associated with
14597  * the entry */
14598  (void) mht_rem (chk->ht, &forward->reloc_oid, NULL, NULL);
14599  free_and_init (forward);
14600  }
14601  else
14602  {
14603  /*
14604  * The entry is not in hash table.
14605  * Add entry into unfound_reloc list
14606  */
14607  if (chk->max_unfound_reloc <= chk->num_unfound_reloc)
14608  {
14609  /*
14610  * Need to realloc the area. Add 100 OIDs to it
14611  */
14613 
14614  ptr = realloc (chk->unfound_reloc_oids, i);
14615  if (ptr == NULL)
14616  {
14618  return DISK_ERROR;
14619  }
14620  else
14621  {
14622  chk->unfound_reloc_oids = (OID *) ptr;
14624  }
14625  }
14626  i = chk->num_unfound_reloc++;
14627  chk->unfound_reloc_oids[i] = oid;
14628  }
14629  break;
14630 
14631  case REC_MARKDELETED:
14633  default:
14634  break;
14635  }
14636  }
14637 
14638  return DISK_VALID;
14639 }
14640 
14641 /*
14642  * Chn guesses for class objects at clients
14643  */
14644 
14645 /*
14646  * Note: Currently, we do not try to guess chn of instances at clients.
14647  * We are just doing it for classes.
14648  *
14649  * We do not know if the object is cached on the client side at all, we
14650  * are just guessing that it is still cached if it was sent to it. This is
14651  * almost 100% true since classes are avoided during garbage collection.
14652 
14653  * Caller does not know the chn when the client is fetching instances of the
14654  * class without knowning the class_oid. That does not imply that the
14655  * class object is not cached on the workspace. The client just did not
14656  * know the class_oid of the given fetched object. The server finds it and
14657  * has to decide whether or not to sent the class object. If the server does
14658  * not send the class object, and the client does not have it; the client will
14659  * request the class object (another server call)
14660  */
14661 
14662 /*
14663  * heap_chnguess_initialize () - Initalize structure of chn guesses at clients
14664  * return: NO_ERROR
14665  *
14666  * Note: Initialize structures used to cache information of CHN guess
14667  * at client workspaces.
14668  * Note: We current maintain that information only for classes.
14669  */
14670 static int
14671 heap_chnguess_initialize (void)
14672 {
14673  HEAP_CHNGUESS_ENTRY *entry;
14674  int i;
14675  int ret = NO_ERROR;
14676 
14677  if (heap_Guesschn != NULL)
14678  {
14679  ret = heap_chnguess_finalize ();
14680  if (ret != NO_ERROR)
14681  {
14682  goto exit_on_error;
14683  }
14684  }
14685 
14686  heap_Guesschn_area.schema_change = false;
14687  heap_Guesschn_area.clock_hand = -1;
14688  heap_Guesschn_area.num_entries = HEAP_CLASSREPR_MAXCACHE;
14689 
14690  /*
14691  * Start with at least the fude factor of clients. Make sure that every
14692  * bit is used.
14693  */
14694  heap_Guesschn_area.num_clients = logtb_get_number_of_total_tran_indices ();
14695  if (heap_Guesschn_area.num_clients < HEAP_CHNGUESS_FUDGE_MININDICES)
14696  {
14697  heap_Guesschn_area.num_clients = HEAP_CHNGUESS_FUDGE_MININDICES;
14698  }
14699 
14700  /* Make sure every single bit is used */
14701  heap_Guesschn_area.nbytes = HEAP_NBITS_TO_NBYTES (heap_Guesschn_area.num_clients);
14702  heap_Guesschn_area.num_clients = HEAP_NBYTES_TO_NBITS (heap_Guesschn_area.nbytes);
14703 
14704  /* Build the hash table from OID to CHN */
14705  heap_Guesschn_area.ht =
14706  mht_create ("Memory hash OID to chn at clients", HEAP_CLASSREPR_MAXCACHE, oid_hash, oid_compare_equals);
14707  if (heap_Guesschn_area.ht == NULL)
14708  {
14709  goto exit_on_error;
14710  }
14711 
14712  heap_Guesschn_area.entries =
14713  (HEAP_CHNGUESS_ENTRY *) malloc (sizeof (HEAP_CHNGUESS_ENTRY) * heap_Guesschn_area.num_entries);
14714  if (heap_Guesschn_area.entries == NULL)
14715  {
14717  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ret, 1, sizeof (HEAP_CHNGUESS_ENTRY) * heap_Guesschn_area.num_entries);
14718  mht_destroy (heap_Guesschn_area.ht);
14719  goto exit_on_error;
14720  }
14721 
14722  heap_Guesschn_area.bitindex = (unsigned char *) malloc (heap_Guesschn_area.nbytes * heap_Guesschn_area.num_entries);
14723  if (heap_Guesschn_area.bitindex == NULL)
14724  {
14727  (size_t) (heap_Guesschn_area.nbytes * heap_Guesschn_area.num_entries));
14728  mht_destroy (heap_Guesschn_area.ht);
14729  free_and_init (heap_Guesschn_area.entries);
14730  goto exit_on_error;
14731  }
14732 
14733  /*
14734  * Initialize every entry as not recently freed
14735  */
14736  for (i = 0; i < heap_Guesschn_area.num_entries; i++)
14737  {
14738  entry = &heap_Guesschn_area.entries[i];
14739  entry->idx = i;
14740  entry->chn = NULL_CHN;
14741  entry->recently_accessed = false;
14742  OID_SET_NULL (&entry->oid);
14743  entry->bits = &heap_Guesschn_area.bitindex[i * heap_Guesschn_area.nbytes];
14744  HEAP_NBYTES_CLEARED (entry->bits, heap_Guesschn_area.nbytes);
14745  }
14746  heap_Guesschn = &heap_Guesschn_area;
14747 
14748  return ret;
14749 
14750 exit_on_error:
14751 
14752  return (ret == NO_ERROR) ? ER_FAILED : ret;
14753 }
14754 
14755 /*
14756  * heap_chnguess_realloc () - More clients that currently maintained
14757  * return: NO_ERROR
14758  *
14759  * Note: Expand the chn_guess structures to support at least the number
14760  * currently connected clients.
14761  */
14762 static int
14763 heap_chnguess_realloc (void)
14764 {
14765  int i;
14766  unsigned char *save_bitindex;
14767  int save_nbytes;
14768  HEAP_CHNGUESS_ENTRY *entry;
14769  int ret = NO_ERROR;
14770 
14771  if (heap_Guesschn == NULL)
14772  {
14773  return heap_chnguess_initialize ();
14774  }
14775 
14776  /*
14777  * Save current information, so we can copy them at a alater point
14778  */
14779  save_bitindex = heap_Guesschn_area.bitindex;
14780  save_nbytes = heap_Guesschn_area.nbytes;
14781 
14782  /*
14783  * Find the number of clients that need to be supported. Avoid small
14784  * increases since it is undesirable to realloc again. Increase by at least
14785  * the fudge factor.
14786  */
14787 
14788  heap_Guesschn->num_clients += HEAP_CHNGUESS_FUDGE_MININDICES;
14790 
14791  if (heap_Guesschn->num_clients < i)
14792  {
14793  heap_Guesschn->num_clients = i + HEAP_CHNGUESS_FUDGE_MININDICES;
14794  }
14795 
14796  /* Make sure every single bit is used */
14797  heap_Guesschn_area.nbytes = HEAP_NBITS_TO_NBYTES (heap_Guesschn_area.num_clients);
14798  heap_Guesschn_area.num_clients = HEAP_NBYTES_TO_NBITS (heap_Guesschn_area.nbytes);
14799 
14800  heap_Guesschn_area.bitindex = (unsigned char *) malloc (heap_Guesschn_area.nbytes * heap_Guesschn_area.num_entries);
14801  if (heap_Guesschn_area.bitindex == NULL)
14802  {
14805  (size_t) (heap_Guesschn_area.nbytes * heap_Guesschn_area.num_entries));
14806  heap_Guesschn_area.bitindex = save_bitindex;
14807  heap_Guesschn_area.nbytes = save_nbytes;
14808  heap_Guesschn_area.num_clients = HEAP_NBYTES_TO_NBITS (save_nbytes);
14809  goto exit_on_error;
14810  }
14811 
14812  /*
14813  * Now reset the bits for each entry
14814  */
14815 
14816  for (i = 0; i < heap_Guesschn_area.num_entries; i++)
14817  {
14818  entry = &heap_Guesschn_area.entries[i];
14819  entry->bits = &heap_Guesschn_area.bitindex[i * heap_Guesschn_area.nbytes];
14820  /*
14821  * Copy the bits
14822  */
14823  memcpy (entry->bits, &save_bitindex[i * save_nbytes], save_nbytes);
14824  HEAP_NBYTES_CLEARED (&entry->bits[save_nbytes], heap_Guesschn_area.nbytes - save_nbytes);
14825  }
14826  /*
14827  * Now throw previous storage
14828  */
14829  free_and_init (save_bitindex);
14830 
14831  return ret;
14832 
14833 exit_on_error:
14834 
14835  return (ret == NO_ERROR && (ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret;
14836 }
14837 
14838 /*
14839  * heap_chnguess_finalize () - Finish chnguess information
14840  * return: NO_ERROR
14841  *
14842  * Note: Destroy hash table and memory for entries.
14843  */
14844 static int
14845 heap_chnguess_finalize (void)
14846 {
14847  int ret = NO_ERROR;
14848 
14849  if (heap_Guesschn == NULL)
14850  {
14851  return NO_ERROR; /* nop */
14852  }
14853 
14854  mht_destroy (heap_Guesschn->ht);
14855  free_and_init (heap_Guesschn->entries);
14856  free_and_init (heap_Guesschn->bitindex);
14857  heap_Guesschn->ht = NULL;
14858  heap_Guesschn->schema_change = false;
14859  heap_Guesschn->clock_hand = 0;
14860  heap_Guesschn->num_entries = 0;
14861  heap_Guesschn->num_clients = 0;
14862  heap_Guesschn->nbytes = 0;
14863 
14864  heap_Guesschn = NULL;
14865 
14866  return ret;
14867 }
14868 
14869 /*
14870  * heap_stats_bestspace_initialize () - Initialize structure of best space
14871  * return: NO_ERROR
14872  */
14873 static int
14874 heap_stats_bestspace_initialize (void)
14875 {
14876  int ret = NO_ERROR;
14877 
14878  if (heap_Bestspace != NULL)
14879  {
14880  ret = heap_stats_bestspace_finalize ();
14881  if (ret != NO_ERROR)
14882  {
14883  goto exit_on_error;
14884  }
14885  }
14886 
14887  heap_Bestspace = &heap_Bestspace_cache_area;
14888 
14889  pthread_mutex_init (&heap_Bestspace->bestspace_mutex, NULL);
14890 
14891  heap_Bestspace->num_stats_entries = 0;
14892 
14893  heap_Bestspace->hfid_ht =
14894  mht_create ("Memory hash HFID to {bestspace}", HEAP_STATS_ENTRY_MHT_EST_SIZE, heap_hash_hfid, heap_compare_hfid);
14895  if (heap_Bestspace->hfid_ht == NULL)
14896  {
14897  goto exit_on_error;
14898  }
14899 
14900  heap_Bestspace->vpid_ht =
14901  mht_create ("Memory hash VPID to {bestspace}", HEAP_STATS_ENTRY_MHT_EST_SIZE, heap_hash_vpid, heap_compare_vpid);
14902  if (heap_Bestspace->vpid_ht == NULL)
14903  {
14904  goto exit_on_error;
14905  }
14906 
14907  heap_Bestspace->num_alloc = 0;
14908  heap_Bestspace->num_free = 0;
14909  heap_Bestspace->free_list_count = 0;
14910  heap_Bestspace->free_list = NULL;
14911 
14912  return ret;
14913 
14914 exit_on_error:
14915 
14916  return (ret == NO_ERROR) ? ER_FAILED : ret;
14917 }
14918 
14919 /*
14920  * heap_stats_bestspace_finalize () - Finish best space information
14921  * return: NO_ERROR
14922  *
14923  * Note: Destroy hash table and memory for entries.
14924  */
14925 static int
14926 heap_stats_bestspace_finalize (void)
14927 {
14928  HEAP_STATS_ENTRY *ent;
14929  int ret = NO_ERROR;
14930 
14931  if (heap_Bestspace == NULL)
14932  {
14933  return NO_ERROR;
14934  }
14935 
14936  if (heap_Bestspace->vpid_ht != NULL)
14937  {
14938  (void) mht_map_no_key (NULL, heap_Bestspace->vpid_ht, heap_stats_entry_free, NULL);
14939  while (heap_Bestspace->free_list_count > 0)
14940  {
14941  ent = heap_Bestspace->free_list;
14942  assert_release (ent != NULL);
14943 
14944  heap_Bestspace->free_list = ent->next;
14945  ent->next = NULL;
14946 
14947  free (ent);
14948 
14949  heap_Bestspace->free_list_count--;
14950  }
14951  assert_release (heap_Bestspace->free_list == NULL);
14952  }
14953 
14954  if (heap_Bestspace->vpid_ht != NULL)
14955  {
14956  mht_destroy (heap_Bestspace->vpid_ht);
14957  heap_Bestspace->vpid_ht = NULL;
14958  }
14959 
14960  if (heap_Bestspace->hfid_ht != NULL)
14961  {
14962  mht_destroy (heap_Bestspace->hfid_ht);
14963  heap_Bestspace->hfid_ht = NULL;
14964  }
14965 
14966  pthread_mutex_destroy (&heap_Bestspace->bestspace_mutex);
14967 
14968  heap_Bestspace = NULL;
14969 
14970  return ret;
14971 }
14972 
14973 /*
14974  * heap_chnguess_decache () - Decache a specific entry or all entries
14975  * return: NO_ERROR
14976  * oid(in): oid: class oid or NULL
14977  * IF NULL implies all classes
14978  *
14979  * Note: Remove from the hash the entry associated with given oid. If
14980  * oid is NULL, all entries in hash are removed.
14981  * This function is called when a class is updated or during
14982  * rollback when a class was changed
14983  */
14984 static int
14985 heap_chnguess_decache (const OID * oid)
14986 {
14987  HEAP_CHNGUESS_ENTRY *entry;
14988  int ret = NO_ERROR;
14989 
14990  if (heap_Guesschn == NULL)
14991  {
14992  return NO_ERROR; /* nop */
14993  }
14994 
14995  if (oid == NULL)
14996  {
14997  (void) mht_map (heap_Guesschn->ht, heap_chnguess_remove_entry, NULL);
14998  }
14999  else
15000  {
15001  entry = (HEAP_CHNGUESS_ENTRY *) mht_get (heap_Guesschn->ht, oid);
15002  if (entry != NULL)
15003  {
15004  (void) heap_chnguess_remove_entry (oid, entry, NULL);
15005  }
15006  }
15007 
15008  if (heap_Guesschn->schema_change == true && oid == NULL)
15009  {
15010  heap_Guesschn->schema_change = false;
15011  }
15012 
15013  return ret;
15014 }
15015 
15016 /*
15017  * heap_chnguess_remove_entry () - Remove an entry from chnguess hash table
15018  * return: NO_ERROR
15019  * oid_key(in): Key (oid) of chnguess table
15020  * ent(in): The entry of hash table
15021  * xignore(in): Extra arguments (currently ignored)
15022  *
15023  * Note: Remove from the hash the given entry. The entry is marked as
15024  * for immediate reuse.
15025  */
15026 static int
15027 heap_chnguess_remove_entry (const void *oid_key, void *ent, void *xignore)
15028 {
15029  HEAP_CHNGUESS_ENTRY *entry = (HEAP_CHNGUESS_ENTRY *) ent;
15030 
15031  /* mht_rem() has been updated to take a function and an arg pointer that can be called on the entry before it is
15032  * removed. We may want to take advantage of that here to free the memory associated with the entry */
15033  (void) mht_rem (heap_Guesschn->ht, oid_key, NULL, NULL);
15034  OID_SET_NULL (&entry->oid);
15035  entry->chn = NULL_CHN;
15036  entry->recently_accessed = false;
15037  heap_Guesschn_area.clock_hand = entry->idx;
15038 
15039  return NO_ERROR;
15040 }
15041 
15042 #if defined (CUBRID_DEBUG)
15043 /*
15044  * heap_chnguess_dump () - Dump current chnguess hash table
15045  * return:
15046  *
15047  * Note: Dump all valid chnguess entries.
15048  */
15049 void
15050 heap_chnguess_dump (FILE * fp)
15051 {
15052  int max_tranindex, tran_index, i;
15053  HEAP_CHNGUESS_ENTRY *entry;
15054 
15055  if (heap_Guesschn != NULL)
15056  {
15057  fprintf (fp, "*** Dump of CLASS_OID to CHNGUESS at clients *** \n");
15058  fprintf (fp, "Schema_change = %d, clock_hand = %d,\n", heap_Guesschn->schema_change, heap_Guesschn->clock_hand);
15059  fprintf (fp, "Nentries = %d, Nactive_entries = %u, maxnum of clients = %d, nbytes = %d\n",
15060  heap_Guesschn->num_entries, mht_count (heap_Guesschn->ht), heap_Guesschn->num_clients,
15061  heap_Guesschn->nbytes);
15062  fprintf (fp, "Hash Table = %p, Entries = %p, Bitindex = %p\n", heap_Guesschn->ht, heap_Guesschn->entries,
15063  heap_Guesschn->bitindex);
15064 
15065  max_tranindex = logtb_get_number_of_total_tran_indices ();
15066  for (i = 0; i < heap_Guesschn->num_entries; i++)
15067  {
15068  entry = &heap_Guesschn_area.entries[i];
15069 
15070  if (!OID_ISNULL (&entry->oid))
15071  {
15072  fprintf (fp, " \nEntry_id %d", entry->idx);
15073  fprintf (fp, "OID = %2d|%4d|%2d, chn = %d, recently_free = %d,", entry->oid.volid, entry->oid.pageid,
15074  entry->oid.slotid, entry->chn, entry->recently_accessed);
15075 
15076  /* Dump one bit at a time */
15077  for (tran_index = 0; tran_index < max_tranindex; tran_index++)
15078  {
15079  if (tran_index % 40 == 0)
15080  {
15081  fprintf (fp, "\n ");
15082  }
15083  else if (tran_index % 10 == 0)
15084  {
15085  fprintf (fp, " ");
15086  }
15087  fprintf (fp, "%d", HEAP_BIT_GET (entry->bits, tran_index) ? 1 : 0);
15088  }
15089  fprintf (fp, "\n");
15090  }
15091  }
15092  }
15093 }
15094 #endif /* CUBRID_DEBUG */
15095 
15096 /*
15097  * heap_chnguess_get () - Guess chn of given oid for given tran index (at client)
15098  * return:
15099  * oid(in): OID from where to guess chn at client workspace
15100  * tran_index(in): The client transaction index
15101  *
15102  * Note: Find/guess the chn of the given OID object at the workspace of
15103  * given client transaction index
15104  */
15105 int
15106 heap_chnguess_get (THREAD_ENTRY * thread_p, const OID * oid, int tran_index)
15107 {
15108  int chn = NULL_CHN;
15109  HEAP_CHNGUESS_ENTRY *entry;
15110 
15111  if (csect_enter (thread_p, CSECT_HEAP_CHNGUESS, INF_WAIT) != NO_ERROR)
15112  {
15113  return NULL_CHN;
15114  }
15115 
15116  if (heap_Guesschn != NULL)
15117  {
15118  if (heap_Guesschn->num_clients <= tran_index)
15119  {
15120  if (heap_chnguess_realloc () != NO_ERROR)
15121  {
15122  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
15123  return NULL_CHN;
15124  }
15125  }
15126 
15127  /*
15128  * Do we have this entry in hash table, if we do then check corresponding
15129  * bit for given client transaction index.
15130  */
15131 
15132  entry = (HEAP_CHNGUESS_ENTRY *) mht_get (heap_Guesschn->ht, oid);
15133  if (entry != NULL && HEAP_BIT_GET (entry->bits, tran_index))
15134  {
15135  chn = entry->chn;
15136  }
15137  }
15138 
15139  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
15140 
15141  return chn;
15142 }
15143 
15144 /*
15145  * heap_chnguess_put () - Oid object is in the process of been sent to client
15146  * return: chn or NULL_CHN if not cached
15147  * oid(in): object oid
15148  * tran_index(in): The client transaction index
15149  * chn(in): cache coherency number.
15150  *
15151  * Note: Cache the information that object oid with chn has been sent
15152  * to client with trans_index.
15153  * If the function fails, it returns NULL_CHN. This failure is
15154  * more like a warning since the chnguess is just a caching structure.
15155  */
15156 int
15157 heap_chnguess_put (THREAD_ENTRY * thread_p, const OID * oid, int tran_index, int chn)
15158 {
15159  int i;
15160  bool can_continue;
15161  HEAP_CHNGUESS_ENTRY *entry;
15162 
15163  if (heap_Guesschn == NULL)
15164  {
15165  return NULL_CHN;
15166  }
15167 
15168  if (csect_enter (thread_p, CSECT_HEAP_CHNGUESS, INF_WAIT) != NO_ERROR)
15169  {
15170  return NULL_CHN;
15171  }
15172 
15173  if (heap_Guesschn->num_clients <= tran_index)
15174  {
15175  if (heap_chnguess_realloc () != NO_ERROR)
15176  {
15177  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
15178  return NULL_CHN;
15179  }
15180  }
15181 
15182  /*
15183  * Is the entry already in the chnguess hash table ?
15184  */
15185  entry = (HEAP_CHNGUESS_ENTRY *) mht_get (heap_Guesschn->ht, oid);
15186  if (entry != NULL)
15187  {
15188  /*
15189  * If the cache coherence number is different reset all client entries
15190  */
15191  if (entry->chn != chn)
15192  {
15193  HEAP_NBYTES_CLEARED (entry->bits, heap_Guesschn_area.nbytes);
15194  entry->chn = chn;
15195  }
15196  }
15197  else
15198  {
15199  /*
15200  * Replace one of the entries that has not been used for a while.
15201  * Follow clock replacement algorithm.
15202  */
15203  can_continue = true;
15204  while (entry == NULL && can_continue == true)
15205  {
15206  can_continue = false;
15207  for (i = 0; i < heap_Guesschn->num_entries; i++)
15208  {
15209  /*
15210  * Increase the clock to next entry
15211  */
15212  heap_Guesschn->clock_hand++;
15213  if (heap_Guesschn->clock_hand >= heap_Guesschn->num_entries)
15214  {
15215  heap_Guesschn->clock_hand = 0;
15216  }
15217 
15218  entry = &heap_Guesschn->entries[heap_Guesschn->clock_hand];
15219  if (entry->recently_accessed == true)
15220  {
15221  /*
15222  * Set recently freed to false, so it can be replaced in next
15223  * if the entry is not referenced
15224  */
15225  entry->recently_accessed = false;
15226  entry = NULL;
15227  can_continue = true;
15228  }
15229  else
15230  {
15231  entry->oid = *oid;
15232  entry->chn = chn;
15233  HEAP_NBYTES_CLEARED (entry->bits, heap_Guesschn_area.nbytes);
15234  break;
15235  }
15236  }
15237  }
15238  }
15239 
15240  /*
15241  * Now set the desired client transaction index bit
15242  */
15243  if (entry != NULL)
15244  {
15245  HEAP_BIT_SET (entry->bits, tran_index);
15246  entry->recently_accessed = true;
15247  }
15248  else
15249  {
15250  chn = NULL_CHN;
15251  }
15252 
15253  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
15254 
15255  return chn;
15256 }
15257 
15258 /*
15259  * heap_chnguess_clear () - Clear any cached information for given client
15260  * used when client is shutdown
15261  * return:
15262  * tran_index(in): The client transaction index
15263  *
15264  * Note: Clear the transaction index bit for all chnguess entries.
15265  */
15266 void
15267 heap_chnguess_clear (THREAD_ENTRY * thread_p, int tran_index)
15268 {
15269  int i;
15270  HEAP_CHNGUESS_ENTRY *entry;
15271 
15272  if (csect_enter (thread_p, CSECT_HEAP_CHNGUESS, INF_WAIT) != NO_ERROR)
15273  {
15274  return;
15275  }
15276 
15277  if (heap_Guesschn != NULL)
15278  {
15279  for (i = 0; i < heap_Guesschn->num_entries; i++)
15280  {
15281  entry = &heap_Guesschn_area.entries[i];
15282  if (!OID_ISNULL (&entry->oid))
15283  {
15284  HEAP_BIT_CLEAR (entry->bits, (unsigned int) tran_index);
15285  }
15286  }
15287  }
15288 
15289  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
15290 
15291 }
15292 
15293 /*
15294  * Recovery functions
15295  */
15296 
15297 /*
15298  * heap_rv_redo_newpage () - Redo the statistics or a new page allocation for
15299  * a heap file
15300  * return: int
15301  * rcv(in): Recovery structure
15302  */
15303 int
15305 {
15306  RECDES recdes;
15307  INT16 slotid;
15308  int sp_success;
15309 
15310  (void) pgbuf_set_page_ptype (thread_p, rcv->pgptr, PAGE_HEAP);
15311 
15312  /* Initialize header page */
15313  spage_initialize (thread_p, rcv->pgptr, heap_get_spage_type (), HEAP_MAX_ALIGN, SAFEGUARD_RVSPACE);
15314 
15315  /* Now insert first record (either statistics or chain record) */
15316  recdes.area_size = recdes.length = rcv->length;
15317  recdes.type = REC_HOME;
15318  recdes.data = (char *) rcv->data;
15319  sp_success = spage_insert (thread_p, rcv->pgptr, &recdes, &slotid);
15320  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15321 
15322  if (sp_success != SP_SUCCESS || slotid != HEAP_HEADER_AND_CHAIN_SLOTID)
15323  {
15324  if (sp_success != SP_SUCCESS)
15325  {
15327  }
15328  /* something went wrong. Unable to redo initialization of new heap page */
15329  assert (er_errid () != NO_ERROR);
15330  return er_errid ();
15331  }
15332 
15333  return NO_ERROR;
15334 }
15335 
15336 /*
15337  * heap_rv_undoredo_pagehdr () - Recover the header of a heap page
15338  * (either statistics/chain)
15339  * return: int
15340  * rcv(in): Recovery structure
15341  *
15342  * Note: Recover the update of the header or a heap page. The header
15343  * can be the heap header or a chain header.
15344  */
15345 int
15347 {
15348  RECDES recdes;
15349  int sp_success;
15350 
15351  (void) pgbuf_check_page_ptype (thread_p, rcv->pgptr, PAGE_HEAP);
15352 
15353  recdes.area_size = recdes.length = rcv->length;
15354  recdes.type = REC_HOME;
15355  recdes.data = (char *) rcv->data;
15356 
15357  sp_success = spage_update (thread_p, rcv->pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes);
15358  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15359 
15360  if (sp_success != SP_SUCCESS)
15361  {
15362  /* something went wrong. Unable to redo update statistics for chain */
15363  if (sp_success != SP_ERROR)
15364  {
15366  }
15367  assert (er_errid () != NO_ERROR);
15368  return er_errid ();
15369  }
15370  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15371 
15372  return NO_ERROR;
15373 }
15374 
15375 /*
15376  * heap_rv_dump_statistics () - Dump statistics recovery information
15377  * return: int
15378  * ignore_length(in): Length of Recovery Data
15379  * data(in): The data being logged
15380  *
15381  * Note: Dump statistics recovery information
15382  */
15383 void
15384 heap_rv_dump_statistics (FILE * fp, int ignore_length, void *data)
15385 {
15386  int ret = NO_ERROR;
15387 
15388  HEAP_HDR_STATS *heap_hdr; /* Header of heap structure */
15389 
15390  heap_hdr = (HEAP_HDR_STATS *) data;
15391  ret = heap_dump_hdr (fp, heap_hdr);
15392 }
15393 
15394 /*
15395  * heap_rv_dump_chain () - Dump chain recovery information
15396  * return: int
15397  * ignore_length(in): Length of Recovery Data
15398  * data(in): The data being logged
15399  */
15400 void
15401 heap_rv_dump_chain (FILE * fp, int ignore_length, void *data)
15402 {
15403  HEAP_CHAIN *chain;
15404 
15405  chain = (HEAP_CHAIN *) data;
15406  fprintf (fp, "CLASS_OID = %2d|%4d|%2d, PREV_VPID = %2d|%4d, NEXT_VPID = %2d|%4d, MAX_MVCCID=%llu, flags=%d.\n",
15407  chain->class_oid.volid, chain->class_oid.pageid, chain->class_oid.slotid, chain->prev_vpid.volid,
15408  chain->prev_vpid.pageid, chain->next_vpid.volid, chain->next_vpid.pageid,
15409  (unsigned long long int) chain->max_mvccid, (int) chain->flags);
15410 }
15411 
15412 /*
15413  * heap_rv_redo_insert () - Redo the insertion of an object
15414  * return: int
15415  * rcv(in): Recovery structure
15416  *
15417  * Note: Redo the insertion of an object at a specific location (OID).
15418  */
15419 int
15421 {
15422  INT16 slotid;
15423  RECDES recdes;
15424  int sp_success;
15425 
15426  slotid = rcv->offset;
15427  recdes.type = *(INT16 *) (rcv->data);
15428  recdes.data = (char *) (rcv->data) + sizeof (recdes.type);
15429  recdes.area_size = recdes.length = rcv->length - sizeof (recdes.type);
15430 
15432  {
15433  /*
15434  * The data here isn't really the data to be inserted (because there
15435  * wasn't any); instead it's the number of bytes that were reserved
15436  * for future insertion. Change recdes.length to reflect the number
15437  * of bytes to reserve, but there's no need for a valid recdes.data:
15438  * spage_insert_for_recovery knows to ignore it in this case.
15439  */
15440  recdes.area_size = recdes.length = *(INT16 *) recdes.data;
15441  recdes.data = NULL;
15442  }
15443 
15444  sp_success = spage_insert_for_recovery (thread_p, rcv->pgptr, slotid, &recdes);
15445  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15446 
15447  if (sp_success != SP_SUCCESS)
15448  {
15449  /* Unable to redo insertion */
15450  if (sp_success != SP_ERROR)
15451  {
15453  }
15454  assert (er_errid () != NO_ERROR);
15455  return er_errid ();
15456  }
15457 
15458  return NO_ERROR;
15459 }
15460 
15461 /*
15462  * heap_mvcc_log_insert () - Log MVCC insert heap operation.
15463  *
15464  * return : Void.
15465  * thread_p (in) : Thread entry.
15466  * p_recdes (in) : Newly inserted record.
15467  * p_addr (in) : Log address data.
15468  */
15469 static void
15470 heap_mvcc_log_insert (THREAD_ENTRY * thread_p, RECDES * p_recdes, LOG_DATA_ADDR * p_addr)
15471 {
15472 #define HEAP_LOG_MVCC_INSERT_MAX_REDO_CRUMBS 4
15473 
15474  int n_redo_crumbs = 0, data_copy_offset = 0, chn_offset;
15476  INT32 mvcc_flags;
15477  HEAP_PAGE_VACUUM_STATUS vacuum_status;
15478 
15479  assert (p_recdes != NULL);
15480  assert (p_addr != NULL);
15481 
15482  vacuum_status = heap_page_get_vacuum_status (thread_p, p_addr->pgptr);
15483 
15484  /* Update chain. */
15485  heap_page_update_chain_after_mvcc_op (thread_p, p_addr->pgptr, logtb_get_current_mvccid (thread_p));
15486  if (vacuum_status != heap_page_get_vacuum_status (thread_p, p_addr->pgptr))
15487  {
15488  /* Mark status change for recovery. */
15490  }
15491 
15492  /* Build redo crumbs */
15493  /* Add record type */
15494  redo_crumbs[n_redo_crumbs].length = sizeof (p_recdes->type);
15495  redo_crumbs[n_redo_crumbs++].data = &p_recdes->type;
15496 
15497  if (p_recdes->type != REC_BIGONE)
15498  {
15499  mvcc_flags = (INT32) OR_GET_MVCC_FLAG (p_recdes->data);
15500  chn_offset = OR_CHN_OFFSET;
15501 
15502  /* Add representation ID and flags field */
15503  redo_crumbs[n_redo_crumbs].length = OR_INT_SIZE;
15504  redo_crumbs[n_redo_crumbs++].data = p_recdes->data;
15505 
15506  /* Add CHN */
15507  redo_crumbs[n_redo_crumbs].length = OR_INT_SIZE;
15508  redo_crumbs[n_redo_crumbs++].data = p_recdes->data + chn_offset;
15509 
15510  /* Set data copy offset after the record header */
15511  data_copy_offset = OR_HEADER_SIZE (p_recdes->data);
15512  }
15513 
15514  /* Add record data - record may be skipped if the record is not big one */
15515  redo_crumbs[n_redo_crumbs].length = p_recdes->length - data_copy_offset;
15516  redo_crumbs[n_redo_crumbs++].data = p_recdes->data + data_copy_offset;
15517 
15518  /* Safe guard */
15519  assert (n_redo_crumbs <= HEAP_LOG_MVCC_INSERT_MAX_REDO_CRUMBS);
15520 
15521  /* Append redo crumbs; undo crumbs not necessary as the spage_delete physical operation uses the offset field of the
15522  * address */
15523  log_append_undoredo_crumbs (thread_p, RVHF_MVCC_INSERT, p_addr, 0, n_redo_crumbs, NULL, redo_crumbs);
15524 }
15525 
15526 /*
15527  * heap_rv_mvcc_redo_insert () - Redo the MVCC insertion of an object
15528  * return: int
15529  * rcv(in): Recovery structure
15530  *
15531  * Note: MVCC redo the insertion of an object at a specific location (OID).
15532  */
15533 int
15535 {
15536  INT16 slotid;
15537  RECDES recdes;
15538  int chn, sp_success;
15540  INT16 record_type;
15541  bool vacuum_status_change = false;
15542 
15543  assert (rcv->pgptr != NULL);
15544  assert (MVCCID_IS_NORMAL (rcv->mvcc_id));
15545 
15546  slotid = rcv->offset;
15547  if (slotid & HEAP_RV_FLAG_VACUUM_STATUS_CHANGE)
15548  {
15549  vacuum_status_change = true;
15550  }
15551  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
15552  assert (slotid > 0);
15553 
15554  record_type = *(INT16 *) rcv->data;
15555  if (record_type == REC_BIGONE)
15556  {
15557  /* no data header */
15558  HEAP_SET_RECORD (&recdes, rcv->length - sizeof (record_type), rcv->length - sizeof (record_type), REC_BIGONE,
15559  rcv->data + sizeof (record_type));
15560  }
15561  else
15562  {
15564  int repid_and_flags, offset, mvcc_flag, offset_size;
15565 
15566  offset = sizeof (record_type);
15567 
15568  repid_and_flags = OR_GET_INT (rcv->data + offset);
15569  offset += OR_INT_SIZE;
15570 
15571  chn = OR_GET_INT (rcv->data + offset);
15572  offset += OR_INT_SIZE;
15573 
15574  mvcc_flag = (char) ((repid_and_flags >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK);
15575 
15576  assert (!(mvcc_flag & OR_MVCC_FLAG_VALID_DELID));
15577 
15578  if ((repid_and_flags & OR_OFFSET_SIZE_FLAG) == OR_OFFSET_SIZE_1BYTE)
15579  {
15580  offset_size = OR_BYTE_SIZE;
15581  }
15582  else if ((repid_and_flags & OR_OFFSET_SIZE_FLAG) == OR_OFFSET_SIZE_2BYTE)
15583  {
15584  offset_size = OR_SHORT_SIZE;
15585  }
15586  else
15587  {
15588  offset_size = OR_INT_SIZE;
15589  }
15590 
15591  MVCC_SET_REPID (&mvcc_rec_header, repid_and_flags & OR_MVCC_REPID_MASK);
15592  MVCC_SET_FLAG (&mvcc_rec_header, mvcc_flag);
15594  MVCC_SET_CHN (&mvcc_rec_header, chn);
15595 
15597  PTR_ALIGN (data_buffer, MAX_ALIGNMENT));
15598  or_mvcc_add_header (&recdes, &mvcc_rec_header, repid_and_flags & OR_BOUND_BIT_FLAG, offset_size);
15599 
15600  memcpy (recdes.data + recdes.length, rcv->data + offset, rcv->length - offset);
15601  recdes.length += (rcv->length - offset);
15602  }
15603 
15604  sp_success = spage_insert_for_recovery (thread_p, rcv->pgptr, slotid, &recdes);
15605 
15606  if (sp_success != SP_SUCCESS)
15607  {
15608  /* Unable to redo insertion */
15609  assert_release (false);
15610  return ER_FAILED;
15611  }
15612 
15613  heap_page_rv_chain_update (thread_p, rcv->pgptr, rcv->mvcc_id, vacuum_status_change);
15614  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15615 
15616  return NO_ERROR;
15617 }
15618 
15619 /*
15620  * heap_rv_undo_insert () - Undo the insertion of an object.
15621  * return: int
15622  * rcv(in): Recovery structure
15623  *
15624  * Note: Delete an object for recovery purposes. The OID of the object
15625  * is reused since the object was never committed.
15626  */
15627 int
15629 {
15630  INT16 slotid;
15631 
15632  slotid = rcv->offset;
15633  /* Clear HEAP_RV_FLAG_VACUUM_STATUS_CHANGE */
15634  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
15635  (void) spage_delete_for_recovery (thread_p, rcv->pgptr, slotid);
15636  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15637 
15638  return NO_ERROR;
15639 }
15640 
15641 /*
15642  * heap_rv_redo_delete () - Redo the deletion of an object
15643  * return: int
15644  * rcv(in): Recovery structure
15645  *
15646  * Note: Redo the deletion of an object.
15647  * The OID of the object is not reuse since we don't know if the object was a
15648  * newly created object.
15649  */
15650 int
15652 {
15653  INT16 slotid;
15654 
15655  slotid = rcv->offset;
15656  (void) spage_delete (thread_p, rcv->pgptr, slotid);
15657  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15658 
15659  return NO_ERROR;
15660 }
15661 
15662 /*
15663  * heap_mvcc_log_delete () - Log normal MVCC heap delete operation (just
15664  * append delete MVCCID and next version OID).
15665  *
15666  * return : Void.
15667  * thread_p (in) : Thread entry.
15668  * p_addr (in) : Log address data.
15669  * rcvindex(in) : Index to recovery function
15670  */
15671 static void
15672 heap_mvcc_log_delete (THREAD_ENTRY * thread_p, LOG_DATA_ADDR * p_addr, LOG_RCVINDEX rcvindex)
15673 {
15674  char redo_data_buffer[OR_MVCCID_SIZE + MAX_ALIGNMENT];
15675  char *redo_data_p = PTR_ALIGN (redo_data_buffer, MAX_ALIGNMENT);
15676  char *ptr;
15677  int redo_data_size = 0;
15678  HEAP_PAGE_VACUUM_STATUS vacuum_status;
15679 
15680  assert (p_addr != NULL);
15682  || rcvindex == RVHF_MVCC_DELETE_OVERFLOW);
15683 
15684  if (LOG_IS_MVCC_HEAP_OPERATION (rcvindex))
15685  {
15686  vacuum_status = heap_page_get_vacuum_status (thread_p, p_addr->pgptr);
15687 
15688  heap_page_update_chain_after_mvcc_op (thread_p, p_addr->pgptr, logtb_get_current_mvccid (thread_p));
15689  if (heap_page_get_vacuum_status (thread_p, p_addr->pgptr) != vacuum_status)
15690  {
15691  /* Mark vacuum status change for recovery. */
15693  }
15694  }
15695 
15696  /* Prepare redo data. */
15697  ptr = redo_data_p;
15698 
15699  if (rcvindex != RVHF_MVCC_DELETE_REC_HOME)
15700  {
15701  /* MVCCID must be packed also, since it is not saved in log record structure. */
15703  redo_data_size += OR_MVCCID_SIZE;
15704  }
15705 
15706  assert ((ptr - redo_data_buffer) <= (int) sizeof (redo_data_buffer));
15707 
15708  /* Log append undo/redo crumbs */
15709  log_append_undoredo_data (thread_p, rcvindex, p_addr, 0, redo_data_size, NULL, redo_data_p);
15710 }
15711 
15712 /*
15713  * heap_rv_mvcc_undo_delete () - Undo the MVCC deletion of an object
15714  * return: int
15715  * rcv(in): Recovery structure
15716  */
15717 int
15719 {
15720  INT16 slotid;
15722  char data_buffer[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT];
15723  RECDES rebuild_record;
15724 
15725  slotid = rcv->offset;
15726  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
15727  assert (slotid > 0);
15728 
15729  rebuild_record.data = PTR_ALIGN (data_buffer, MAX_ALIGNMENT);
15730  rebuild_record.area_size = DB_PAGESIZE;
15731  if (spage_get_record (thread_p, rcv->pgptr, slotid, &rebuild_record, COPY) != S_SUCCESS)
15732  {
15733  assert_release (false);
15734  return ER_FAILED;
15735  }
15736  assert (rebuild_record.type == REC_HOME || rebuild_record.type == REC_NEWHOME);
15737 
15738  if (or_mvcc_get_header (&rebuild_record, &mvcc_rec_header) != NO_ERROR)
15739  {
15740  assert_release (false);
15741  return ER_FAILED;
15742  }
15745 
15746  if (or_mvcc_set_header (&rebuild_record, &mvcc_rec_header) != NO_ERROR)
15747  {
15748  assert_release (false);
15749  return ER_FAILED;
15750  }
15751 
15752  if (spage_update (thread_p, rcv->pgptr, slotid, &rebuild_record) != SP_SUCCESS)
15753  {
15754  assert_release (false);
15755  return ER_FAILED;
15756  }
15757 
15758  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15759  return NO_ERROR;
15760 }
15761 
15762 /*
15763  * heap_rv_mvcc_undo_delete_overflow () - Undo MVCC delete of an overflow
15764  * record.
15765  *
15766  * return : Error code.
15767  * thread_p (in) : Thread entry.
15768  * rcv (in) : Recovery data.
15769  */
15770 int
15772 {
15773  MVCC_REC_HEADER mvcc_header;
15774 
15775  if (heap_get_mvcc_rec_header_from_overflow (rcv->pgptr, &mvcc_header, NULL) != NO_ERROR)
15776  {
15777  assert_release (false);
15778  return ER_FAILED;
15779  }
15780 
15781  /* All flags should be set. Overflow header should be set to maximum size */
15784 
15785  MVCC_SET_DELID (&mvcc_header, MVCCID_NULL);
15786 
15787  /* Change header. */
15788  if (heap_set_mvcc_rec_header_on_overflow (rcv->pgptr, &mvcc_header) != NO_ERROR)
15789  {
15790  assert_release (false);
15791  return ER_FAILED;
15792  }
15793 
15794  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15795  return NO_ERROR;
15796 }
15797 
15798 /*
15799  * heap_rv_mvcc_redo_delete_internal () - Internal function to be used by
15800  * heap_rv_mvcc_redo_delete_home and
15801  * heap_rv_mvcc_redo_delete_newhome.
15802  *
15803  * return : Error code.
15804  * thread_p (in) : Thread entry.
15805  * page (in) : Heap page.
15806  * slotid (in) : Recovered record slotid.
15807  * mvccid (in) : Delete MVCCID.
15808  */
15809 static int
15810 heap_rv_mvcc_redo_delete_internal (THREAD_ENTRY * thread_p, PAGE_PTR page, PGSLOTID slotid, MVCCID mvccid)
15811 {
15812  RECDES rebuild_record;
15813  char data_buffer[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT];
15815 
15816  assert (page != NULL);
15817  assert (MVCCID_IS_NORMAL (mvccid));
15818 
15819  rebuild_record.data = PTR_ALIGN (data_buffer, MAX_ALIGNMENT);
15820  rebuild_record.area_size = DB_PAGESIZE;
15821 
15822  /* Get record. */
15823  if (spage_get_record (thread_p, page, slotid, &rebuild_record, COPY) != S_SUCCESS)
15824  {
15825  assert_release (false);
15826  return ER_FAILED;
15827  }
15828 
15829  /* Get MVCC header. */
15830  if (or_mvcc_get_header (&rebuild_record, &mvcc_rec_header) != NO_ERROR)
15831  {
15832  assert_release (false);
15833  return ER_FAILED;
15834  }
15835 
15836  /* Set delete MVCCID. */
15838  MVCC_SET_DELID (&mvcc_rec_header, mvccid);
15839 
15840  /* Change header. */
15841  if (or_mvcc_set_header (&rebuild_record, &mvcc_rec_header) != NO_ERROR)
15842  {
15843  assert_release (false);
15844  return ER_FAILED;
15845  }
15846 
15847  /* Update record in page. */
15848  if (spage_update (thread_p, page, slotid, &rebuild_record) != SP_SUCCESS)
15849  {
15850  assert_release (false);
15851  return ER_FAILED;
15852  }
15853 
15854  /* Success. */
15855  return NO_ERROR;
15856 }
15857 
15858 /*
15859  * heap_rv_mvcc_redo_delete_home () - Redo MVCC delete of REC_HOME record.
15860  *
15861  * return : Error code
15862  * thread_p (in) : Thread entry.
15863  * rcv (in) : Recovery data.
15864  */
15865 int
15867 {
15868  int error_code = NO_ERROR;
15869  int offset = 0;
15870  PGSLOTID slotid;
15871  bool vacuum_status_change = false;
15872 
15873  assert (rcv->pgptr != NULL);
15874  assert (MVCCID_IS_NORMAL (rcv->mvcc_id));
15875 
15876  slotid = rcv->offset;
15877  if (slotid & HEAP_RV_FLAG_VACUUM_STATUS_CHANGE)
15878  {
15879  vacuum_status_change = true;
15880  }
15881  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
15882  assert (slotid > 0);
15883 
15884  assert (offset == rcv->length);
15885 
15886  error_code = heap_rv_mvcc_redo_delete_internal (thread_p, rcv->pgptr, slotid, rcv->mvcc_id);
15887  if (error_code != NO_ERROR)
15888  {
15889  ASSERT_ERROR ();
15890  return error_code;
15891  }
15892  heap_page_rv_chain_update (thread_p, rcv->pgptr, rcv->mvcc_id, vacuum_status_change);
15893 
15894  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15895  return NO_ERROR;
15896 }
15897 
15898 /*
15899  * heap_rv_mvcc_redo_delete_overflow () - Redo MVCC delete of overflow record.
15900  *
15901  * return : Error code
15902  * thread_p (in) : Thread entry.
15903  * rcv (in) : Recovery data.
15904  */
15905 int
15907 {
15908  int offset = 0;
15909  MVCCID mvccid;
15910  MVCC_REC_HEADER mvcc_header;
15911 
15912  assert (rcv->pgptr != NULL);
15913 
15914  OR_GET_MVCCID (rcv->data + offset, &mvccid);
15916 
15917  assert (offset == rcv->length);
15918 
15919  if (heap_get_mvcc_rec_header_from_overflow (rcv->pgptr, &mvcc_header, NULL) != NO_ERROR)
15920  {
15921  assert_release (false);
15922  return ER_FAILED;
15923  }
15925 
15927  MVCC_SET_DELID (&mvcc_header, mvccid);
15928 
15929  /* Update MVCC header. */
15930  if (heap_set_mvcc_rec_header_on_overflow (rcv->pgptr, &mvcc_header) != NO_ERROR)
15931  {
15932  assert_release (false);
15933  return ER_FAILED;
15934  }
15935 
15936  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15937  return NO_ERROR;
15938 }
15939 
15940 /*
15941  * heap_rv_mvcc_redo_delete_newhome () - Redo MVCC delete of REC_NEWHOME
15942  * record.
15943  *
15944  * return : Error code
15945  * thread_p (in) : Thread entry.
15946  * rcv (in) : Recovery data.
15947  */
15948 int
15950 {
15951  int error_code = NO_ERROR;
15952  int offset = 0;
15953  MVCCID mvccid;
15954 
15955  assert (rcv->pgptr != NULL);
15956 
15957  OR_GET_MVCCID (rcv->data + offset, &mvccid);
15959 
15960 
15961  assert (offset == rcv->length);
15962 
15963  error_code = heap_rv_mvcc_redo_delete_internal (thread_p, rcv->pgptr, rcv->offset, mvccid);
15964  if (error_code != NO_ERROR)
15965  {
15966  ASSERT_ERROR ();
15967  return error_code;
15968  }
15969 
15970  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15971  return NO_ERROR;
15972 }
15973 
15974 /*
15975  * heap_rv_redo_mark_reusable_slot () - Marks a deleted slot as reusable; used
15976  * as a postponed log operation and a
15977  * REDO function
15978  * return: int
15979  * rcv(in): Recovery structure
15980  *
15981  * Note: Mark (during postponed operation execution)/Redo (during recovery)
15982  * the marking of a deleted slot as reusable.
15983  */
15984 int
15986 {
15987  INT16 slotid;
15988 
15989  slotid = rcv->offset;
15990  (void) spage_mark_deleted_slot_as_reusable (thread_p, rcv->pgptr, slotid);
15991  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
15992 
15993  return NO_ERROR;
15994 }
15995 
15996 /*
15997  * heap_rv_undo_delete () - Undo the deletion of an object
15998  * return: int
15999  * rcv(in): Recovery structure
16000  */
16001 int
16003 {
16004  INT16 slotid;
16005  INT16 recdes_type;
16006  int error_code;
16007 
16008  error_code = heap_rv_redo_insert (thread_p, rcv);
16009  if (error_code != NO_ERROR)
16010  {
16011  return error_code;
16012  }
16013 
16014  /* vacuum atomicity */
16015  recdes_type = *(INT16 *) (rcv->data);
16016  if (recdes_type == REC_NEWHOME)
16017  {
16018  slotid = rcv->offset;
16019  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
16020  error_code = vacuum_rv_check_at_undo (thread_p, rcv->pgptr, slotid, recdes_type);
16021  if (error_code != NO_ERROR)
16022  {
16023  assert_release (false);
16024  return ER_FAILED;
16025  }
16026  }
16027 
16028  return NO_ERROR;
16029 }
16030 
16031 /*
16032  * heap_rv_undo_update () - Undo the update of an object
16033  * return: int
16034  * rev(in): Recovery structure
16035  */
16036 int
16038 {
16039  INT16 recdes_type;
16040  int error_code;
16041 
16042  error_code = heap_rv_undoredo_update (thread_p, rcv);
16043  if (error_code != NO_ERROR)
16044  {
16045  ASSERT_ERROR ();
16046  return error_code;
16047  }
16048 
16049  /* vacuum atomicity */
16050  recdes_type = *(INT16 *) (rcv->data);
16051  if (recdes_type == REC_HOME || recdes_type == REC_NEWHOME)
16052  {
16053  INT16 slotid;
16054 
16055  slotid = rcv->offset;
16056  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
16057  error_code = vacuum_rv_check_at_undo (thread_p, rcv->pgptr, slotid, recdes_type);
16058  if (error_code != NO_ERROR)
16059  {
16060  assert_release (false);
16061  return error_code;
16062  }
16063  }
16064 
16065  return NO_ERROR;
16066 }
16067 
16068 /*
16069  * heap_rv_redo_update () - Redo the update of an object
16070  * return: int
16071  * rcv(in): Recovrery structure
16072  */
16073 int
16075 {
16076  return heap_rv_undoredo_update (thread_p, rcv);
16077 }
16078 
16079 /*
16080  * heap_rv_undoredo_update () - Recover an update either for undo or redo
16081  * return: int
16082  * rcv(in): Recovery structure
16083  */
16084 int
16086 {
16087  INT16 slotid;
16088  RECDES recdes;
16089  int sp_success;
16090 
16091  slotid = rcv->offset;
16092  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
16093  assert (slotid > 0);
16094 
16095  recdes.type = *(INT16 *) (rcv->data);
16096  recdes.data = (char *) (rcv->data) + sizeof (recdes.type);
16097  recdes.area_size = recdes.length = rcv->length - sizeof (recdes.type);
16098  if (recdes.area_size <= 0)
16099  {
16100  sp_success = SP_SUCCESS;
16101  }
16102  else
16103  {
16104  if (heap_update_physical (thread_p, rcv->pgptr, slotid, &recdes) != NO_ERROR)
16105  {
16106  assert_release (false);
16107  return ER_FAILED;
16108  }
16109  }
16110 
16111  return NO_ERROR;
16112 }
16113 
16114 /*
16115  * heap_rv_redo_reuse_page () - Redo the deletion of all objects in page for
16116  * reuse purposes
16117  * return: int
16118  * rcv(in): Recovery structure
16119  */
16120 int
16122 {
16123  VPID vpid;
16124  RECDES recdes;
16125  HEAP_CHAIN *chain; /* Chain to next and prev page */
16126  int sp_success;
16127  const bool is_header_page = ((rcv->offset != 0) ? true : false);
16128 
16129  (void) pgbuf_check_page_ptype (thread_p, rcv->pgptr, PAGE_HEAP);
16130 
16133 
16134  /* We ignore the return value. It should be true (objects were deleted) except for the scenario when the redo actions
16135  * are applied twice. */
16136  (void) heap_delete_all_page_records (thread_p, &vpid, rcv->pgptr);
16137 
16138  /* At here, do not consider the header of heap. Later redo the update of the header of heap at RVHF_STATS log. */
16139  if (!is_header_page)
16140  {
16141  sp_success = spage_get_record (thread_p, rcv->pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK);
16142  if (sp_success != SP_SUCCESS)
16143  {
16144  /* something went wrong. Unable to redo update class_oid */
16145  if (sp_success != SP_ERROR)
16146  {
16148  }
16149  assert (er_errid () != NO_ERROR);
16150  return er_errid ();
16151  }
16152 
16153  chain = (HEAP_CHAIN *) recdes.data;
16154  COPY_OID (&(chain->class_oid), (OID *) (rcv->data));
16155  chain->max_mvccid = MVCCID_NULL;
16156  chain->flags = 0;
16158  }
16159 
16160  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
16161 
16162  return NO_ERROR;
16163 }
16164 
16165 /*
16166  * heap_rv_redo_reuse_page_reuse_oid () - Redo the deletion of all objects in
16167  * a reusable oid heap page for reuse
16168  * purposes
16169  * return: int
16170  * rcv(in): Recovery structure
16171  */
16172 int
16174 {
16175  RECDES recdes;
16176  HEAP_CHAIN *chain; /* Chain to next and prev page */
16177  int sp_success;
16178  const bool is_header_page = ((rcv->offset != 0) ? true : false);
16179 
16180  (void) heap_reinitialize_page (thread_p, rcv->pgptr, is_header_page);
16181 
16182  (void) pgbuf_set_page_ptype (thread_p, rcv->pgptr, PAGE_HEAP);
16183 
16184  /* At here, do not consider the header of heap. Later redo the update of the header of heap at RVHF_STATS log. */
16185  if (!is_header_page)
16186  {
16187  sp_success = spage_get_record (thread_p, rcv->pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK);
16188  if (sp_success != SP_SUCCESS)
16189  {
16190  /* something went wrong. Unable to redo update class_oid */
16191  if (sp_success != SP_ERROR)
16192  {
16194  }
16195  assert (er_errid () != NO_ERROR);
16196  return er_errid ();
16197  }
16198 
16199  chain = (HEAP_CHAIN *) recdes.data;
16200  COPY_OID (&(chain->class_oid), (OID *) (rcv->data));
16201  chain->max_mvccid = MVCCID_NULL;
16202  chain->flags = 0;
16204  }
16205 
16206  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
16207 
16208  return NO_ERROR;
16209 }
16210 
16211 /*
16212  * heap_rv_dump_reuse_page () - Dump reuse page
16213  * return: int
16214  * ignore_length(in): Length of Recovery Data
16215  * ignore_data(in): The data being logged
16216  *
16217  * Note: Dump information about reuse of page.
16218  */
16219 void
16220 heap_rv_dump_reuse_page (FILE * fp, int ignore_length, void *ignore_data)
16221 {
16222  fprintf (fp, "Delete all objects in page for reuse purposes of page\n");
16223 }
16224 
16225 /*
16226  * xheap_get_class_num_objects_pages () -
16227  * return: NO_ERROR
16228  * hfid(in):
16229  * approximation(in):
16230  * nobjs(in):
16231  * npages(in):
16232  */
16233 int
16234 xheap_get_class_num_objects_pages (THREAD_ENTRY * thread_p, const HFID * hfid, int approximation, int *nobjs,
16235  int *npages)
16236 {
16237  int length, num;
16238  int ret;
16239 
16240  assert (!HFID_IS_NULL (hfid));
16241 
16242  if (approximation)
16243  {
16244  num = heap_estimate (thread_p, hfid, npages, nobjs, &length);
16245  }
16246  else
16247  {
16248  num = heap_get_num_objects (thread_p, hfid, npages, nobjs, &length);
16249  }
16250 
16251  if (num < 0)
16252  {
16253  return (((ret = er_errid ()) == NO_ERROR) ? ER_FAILED : ret);
16254  }
16255 
16256  return NO_ERROR;
16257 }
16258 
16259 /*
16260  * xheap_has_instance () -
16261  * return:
16262  * hfid(in):
16263  * class_oid(in):
16264  * has_visible_instance(in): true if we need to check for a visible record
16265  */
16266 int
16267 xheap_has_instance (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, int has_visible_instance)
16268 {
16269  OID oid;
16270  HEAP_SCANCACHE scan_cache;
16271  RECDES recdes;
16272  SCAN_CODE r;
16274 
16275  OID_SET_NULL (&oid);
16276 
16277  if (has_visible_instance)
16278  {
16280  if (mvcc_snapshot == NULL)
16281  {
16282  return ER_FAILED;
16283  }
16284  }
16285  if (heap_scancache_start (thread_p, &scan_cache, hfid, class_oid, true, false, mvcc_snapshot) != NO_ERROR)
16286  {
16287  return ER_FAILED;
16288  }
16289 
16290  recdes.data = NULL;
16291  r = heap_first (thread_p, hfid, class_oid, &oid, &recdes, &scan_cache, true);
16292  heap_scancache_end (thread_p, &scan_cache);
16293 
16294  if (r == S_ERROR)
16295  {
16296  return ER_FAILED;
16297  }
16298  else if (r == S_DOESNT_EXIST || r == S_END)
16299  {
16300  return 0;
16301  }
16302  else
16303  {
16304  return 1;
16305  }
16306 }
16307 
16308 /*
16309  * heap_get_class_repr_id () -
16310  * return:
16311  * class_oid(in):
16312  */
16313 REPR_ID
16314 heap_get_class_repr_id (THREAD_ENTRY * thread_p, OID * class_oid)
16315 {
16316  OR_CLASSREP *rep = NULL;
16317  REPR_ID id;
16318  int idx_incache = -1;
16319 
16320  if (!class_oid || !idx_incache)
16321  {
16322  return 0;
16323  }
16324 
16325  rep = heap_classrepr_get (thread_p, class_oid, NULL, NULL_REPRID, &idx_incache);
16326  if (rep == NULL)
16327  {
16328  return 0;
16329  }
16330 
16331  id = rep->id;
16332  heap_classrepr_free_and_init (rep, &idx_incache);
16333 
16334  return id;
16335 }
16336 
16337 /*
16338  * heap_set_autoincrement_value () -
16339  * return: NO_ERROR, or ER_code
16340  * attr_info(in):
16341  * scan_cache(in):
16342  * is_set(out): 1 if at least one autoincrement value has been set
16343  */
16344 int
16346  int *is_set)
16347 {
16348  int i, idx_in_cache;
16349  char *classname = NULL;
16350  char *attr_name = NULL;
16351  RECDES recdes; /* Used to obtain attribute name */
16352  char serial_name[AUTO_INCREMENT_SERIAL_NAME_MAX_LENGTH];
16353  HEAP_ATTRVALUE *value;
16354  DB_VALUE dbvalue_numeric, *dbvalue, key_val;
16355  OR_ATTRIBUTE *att;
16356  OID serial_class_oid;
16358  OR_CLASSREP *classrep;
16359  BTID serial_btid;
16360  DB_DATA_STATUS data_stat;
16361  HEAP_SCANCACHE local_scan_cache;
16362  bool use_local_scan_cache = false;
16363  int ret = NO_ERROR;
16364  int alloced_string = 0;
16365  char *string = NULL;
16366 
16367  if (!attr_info || !scan_cache)
16368  {
16369  return ER_FAILED;
16370  }
16371 
16372  *is_set = 0;
16373 
16374  recdes.data = NULL;
16375  recdes.area_size = 0;
16376 
16377  for (i = 0; i < attr_info->num_values; i++)
16378  {
16379  value = &attr_info->values[i];
16380  dbvalue = &value->dbvalue;
16381  att = &attr_info->last_classrepr->attributes[i];
16382 
16383  if (att->is_autoincrement && (value->state == HEAP_UNINIT_ATTRVALUE))
16384  {
16385  if (OID_ISNULL (&(att->auto_increment.serial_obj)))
16386  {
16387  memset (serial_name, '\0', sizeof (serial_name));
16388  recdes.data = NULL;
16389  recdes.area_size = 0;
16390 
16391  if (scan_cache->cache_last_fix_page == false)
16392  {
16393  scan_cache = &local_scan_cache;
16394  (void) heap_scancache_quick_start_root_hfid (thread_p, scan_cache);
16395  use_local_scan_cache = true;
16396  }
16397 
16398  if (heap_get_class_record (thread_p, &(attr_info->class_oid), &recdes, scan_cache, PEEK) != S_SUCCESS)
16399  {
16400  ret = ER_FAILED;
16401  goto exit_on_error;
16402  }
16403 
16404  if (heap_get_class_name (thread_p, &(att->classoid), &classname) != NO_ERROR || classname == NULL)
16405  {
16406  ASSERT_ERROR_AND_SET (ret);
16407  goto exit_on_error;
16408  }
16409 
16410  string = NULL;
16411  alloced_string = 0;
16412 
16413  ret = or_get_attrname (&recdes, att->id, &string, &alloced_string);
16414  if (ret != NO_ERROR)
16415  {
16416  ASSERT_ERROR ();
16417  goto exit_on_error;
16418  }
16419 
16420  attr_name = string;
16421  if (attr_name == NULL)
16422  {
16423  ret = ER_FAILED;
16424  goto exit_on_error;
16425  }
16426 
16427  SET_AUTO_INCREMENT_SERIAL_NAME (serial_name, classname, attr_name);
16428 
16429  if (string != NULL && alloced_string == 1)
16430  {
16431  db_private_free_and_init (thread_p, string);
16432  }
16433 
16434  free_and_init (classname);
16435 
16436  if (db_make_varchar (&key_val, DB_MAX_IDENTIFIER_LENGTH, serial_name, (int) strlen (serial_name),
16438  {
16439  ret = ER_FAILED;
16440  goto exit_on_error;
16441  }
16442 
16443  status = xlocator_find_class_oid (thread_p, CT_SERIAL_NAME, &serial_class_oid, NULL_LOCK);
16445  {
16446  ret = ER_FAILED;
16447  goto exit_on_error;
16448  }
16449 
16450  classrep = heap_classrepr_get (thread_p, &serial_class_oid, NULL, NULL_REPRID, &idx_in_cache);
16451  if (classrep == NULL)
16452  {
16453  ret = ER_FAILED;
16454  goto exit_on_error;
16455  }
16456 
16457  if (classrep->indexes)
16458  {
16459  BTREE_SEARCH search_result;
16460  OID serial_oid;
16461 
16462  BTID_COPY (&serial_btid, &(classrep->indexes[0].btid));
16463  search_result =
16464  xbtree_find_unique (thread_p, &serial_btid, S_SELECT, &key_val, &serial_class_oid, &serial_oid,
16465  false);
16466  heap_classrepr_free_and_init (classrep, &idx_in_cache);
16467  if (search_result != BTREE_KEY_FOUND)
16468  {
16469  ret = ER_FAILED;
16470  goto exit_on_error;
16471  }
16472 
16473  assert (!OID_ISNULL (&serial_oid));
16474  ATOMIC_CAS_64 ((INT64 *) (&att->auto_increment.serial_obj), *(INT64 *) (&oid_Null_oid),
16475  *(INT64 *) (&serial_oid));
16476  }
16477  else
16478  {
16479  heap_classrepr_free_and_init (classrep, &idx_in_cache);
16480  ret = ER_FAILED;
16481  goto exit_on_error;
16482  }
16483  }
16484 
16485  if ((att->type == DB_TYPE_SHORT) || (att->type == DB_TYPE_INTEGER) || (att->type == DB_TYPE_BIGINT))
16486  {
16487  if (xserial_get_next_value (thread_p, &dbvalue_numeric, &att->auto_increment.serial_obj, 0, /* no cache */
16488  1, /* generate one value */
16489  GENERATE_AUTO_INCREMENT, false) != NO_ERROR)
16490  {
16491  ret = ER_FAILED;
16492  goto exit_on_error;
16493  }
16494 
16495  if (numeric_db_value_coerce_from_num (&dbvalue_numeric, dbvalue, &data_stat) != NO_ERROR)
16496  {
16497  ret = ER_FAILED;
16498  goto exit_on_error;
16499  }
16500  }
16501  else if (att->type == DB_TYPE_NUMERIC)
16502  {
16503  if (xserial_get_next_value (thread_p, dbvalue, &att->auto_increment.serial_obj, 0, /* no cache */
16504  1, /* generate one value */
16505  GENERATE_AUTO_INCREMENT, false) != NO_ERROR)
16506  {
16507  ret = ER_FAILED;
16508  goto exit_on_error;
16509  }
16510  }
16511 
16512  *is_set = 1;
16513  value->state = HEAP_READ_ATTRVALUE;
16514  }
16515  }
16516 
16517  if (use_local_scan_cache)
16518  {
16519  heap_scancache_end (thread_p, scan_cache);
16520  }
16521 
16522  return ret;
16523 
16524 exit_on_error:
16525  if (classname != NULL)
16526  {
16527  free_and_init (classname);
16528  }
16529 
16530  if (use_local_scan_cache)
16531  {
16532  heap_scancache_end (thread_p, scan_cache);
16533  }
16534  return ret;
16535 }
16536 
16537 /*
16538  * heap_attrinfo_set_uninitialized_global () -
16539  * return: NO_ERROR
16540  * inst_oid(in):
16541  * recdes(in):
16542  * attr_info(in):
16543  */
16544 int
16546  HEAP_CACHE_ATTRINFO * attr_info)
16547 {
16548  if (attr_info == NULL)
16549  {
16550  return ER_FAILED;
16551  }
16552 
16553  return heap_attrinfo_set_uninitialized (thread_p, inst_oid, recdes, attr_info);
16554 }
16555 
16556 /*
16557  * heap_get_hfid_from_class_oid () - get HFID from class oid
16558  * return: error_code
16559  * class_oid(in): class oid
16560  * hfid(out): the resulting hfid
16561  */
16562 int
16563 heap_get_hfid_from_class_oid (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid)
16564 {
16565  int error_code = NO_ERROR;
16566 
16567  error_code = heap_hfid_cache_get (thread_p, class_oid, hfid, NULL);
16568  if (error_code != NO_ERROR)
16569  {
16570  ASSERT_ERROR ();
16571  return error_code;
16572  }
16573 
16574  return error_code;
16575 }
16576 
16577 /*
16578  * heap_get_hfid_and_file_type_from_class_oid () - get HFID and file type for class.
16579  *
16580  * return : error code
16581  * thread_p (in) : thread entry
16582  * class_oid (in) : class OID
16583  * hfid_out (out) : output heap file identifier
16584  * ftype_out (out) : output heap file type
16585  */
16586 int
16587 heap_get_hfid_and_file_type_from_class_oid (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid_out,
16588  FILE_TYPE * ftype_out)
16589 {
16590  int error_code = NO_ERROR;
16591 
16592  error_code = heap_hfid_cache_get (thread_p, class_oid, hfid_out, ftype_out);
16593  if (error_code != NO_ERROR)
16594  {
16595  ASSERT_ERROR_AND_SET (error_code);
16596  return error_code;
16597  }
16598 
16599  return error_code;
16600 }
16601 
16602 /*
16603  * heap_compact_pages () - compact all pages from hfid of specified class OID
16604  * return: error_code
16605  * class_oid(out): the class oid
16606  */
16607 int
16608 heap_compact_pages (THREAD_ENTRY * thread_p, OID * class_oid)
16609 {
16610  int ret = NO_ERROR;
16611  VPID vpid;
16612  VPID next_vpid;
16613  LOG_DATA_ADDR addr;
16614  HFID hfid;
16615  PGBUF_WATCHER pg_watcher;
16616  PGBUF_WATCHER old_pg_watcher;
16617 
16618  if (class_oid == NULL)
16619  {
16621  }
16622 
16623  if (lock_object (thread_p, class_oid, oid_Root_class_oid, IS_LOCK, LK_UNCOND_LOCK) != LK_GRANTED)
16624  {
16625  return ER_FAILED;
16626  }
16627 
16628  ret = heap_get_hfid_from_class_oid (thread_p, class_oid, &hfid);
16629  if (ret != NO_ERROR || HFID_IS_NULL (&hfid))
16630  {
16631  lock_unlock_object (thread_p, class_oid, oid_Root_class_oid, IS_LOCK, true);
16632  return ret;
16633  }
16634 
16636  PGBUF_INIT_WATCHER (&old_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, &hfid);
16637 
16638  addr.vfid = &hfid.vfid;
16639  addr.pgptr = NULL;
16640  addr.offset = 0;
16641 
16642  vpid.volid = hfid.vfid.volid;
16643  vpid.pageid = hfid.hpgid;
16644 
16645  if (pgbuf_ordered_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_READ, &pg_watcher) != NO_ERROR)
16646  {
16647  lock_unlock_object (thread_p, class_oid, oid_Root_class_oid, IS_LOCK, true);
16648  ret = ER_FAILED;
16649  goto exit_on_error;
16650  }
16651 
16652  (void) pgbuf_check_page_ptype (thread_p, pg_watcher.pgptr, PAGE_HEAP);
16653 
16654  lock_unlock_object (thread_p, class_oid, oid_Root_class_oid, IS_LOCK, true);
16655 
16656  /* skip header page */
16657  ret = heap_vpid_next (thread_p, &hfid, pg_watcher.pgptr, &next_vpid);
16658  if (ret != NO_ERROR)
16659  {
16660  goto exit_on_error;
16661  }
16662  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
16663 
16664  while (!VPID_ISNULL (&next_vpid))
16665  {
16666  vpid = next_vpid;
16667  pg_watcher.pgptr =
16669  if (old_pg_watcher.pgptr != NULL)
16670  {
16671  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
16672  }
16673  if (pg_watcher.pgptr == NULL)
16674  {
16675  ret = ER_FAILED;
16676  goto exit_on_error;
16677  }
16678 
16679  ret = heap_vpid_next (thread_p, &hfid, pg_watcher.pgptr, &next_vpid);
16680  if (ret != NO_ERROR)
16681  {
16682  pgbuf_ordered_unfix (thread_p, &pg_watcher);
16683  goto exit_on_error;
16684  }
16685 
16686  if (spage_compact (thread_p, pg_watcher.pgptr) != NO_ERROR)
16687  {
16688  pgbuf_ordered_unfix (thread_p, &pg_watcher);
16689  ret = ER_FAILED;
16690  goto exit_on_error;
16691  }
16692 
16693  addr.pgptr = pg_watcher.pgptr;
16694  log_skip_logging (thread_p, &addr);
16695  pgbuf_set_dirty (thread_p, pg_watcher.pgptr, DONT_FREE);
16696  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
16697  }
16698 
16699  if (old_pg_watcher.pgptr != NULL)
16700  {
16701  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
16702  }
16703  assert (pg_watcher.pgptr == NULL);
16704 
16705  return ret;
16706 
16707 exit_on_error:
16708 
16709  if (pg_watcher.pgptr != NULL)
16710  {
16711  pgbuf_ordered_unfix (thread_p, &pg_watcher);
16712  }
16713  if (old_pg_watcher.pgptr != NULL)
16714  {
16715  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
16716  }
16717 
16718  return ret;
16719 }
16720 
16721 /*
16722  * heap_classrepr_dump_all () - dump all representations belongs to a class
16723  * return: none
16724  * fp(in): file pointer to print out
16725  * class_oid(in): class oid to be dumped
16726  */
16727 void
16728 heap_classrepr_dump_all (THREAD_ENTRY * thread_p, FILE * fp, OID * class_oid)
16729 {
16730  RECDES peek_recdes;
16731  HEAP_SCANCACHE scan_cache;
16732  OR_CLASSREP **rep_all;
16733  int count, i;
16734  char *classname;
16735  bool need_free_classname = false;
16736 
16737  if (heap_get_class_name (thread_p, class_oid, &classname) != NO_ERROR || classname == NULL)
16738  {
16739  classname = (char *) "unknown";
16740  er_clear ();
16741  }
16742  else
16743  {
16744  need_free_classname = true;
16745  }
16746 
16747  heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
16748 
16749  if (heap_get_class_record (thread_p, class_oid, &peek_recdes, &scan_cache, PEEK) == S_SUCCESS)
16750  {
16751  rep_all = or_get_all_representation (&peek_recdes, true, &count);
16752  fprintf (fp, "*** Dumping representations of class %s\n Classname = %s, Class-OID = %d|%d|%d, #Repr = %d\n",
16753  classname, classname, (int) class_oid->volid, class_oid->pageid, (int) class_oid->slotid, count);
16754 
16755  for (i = 0; i < count; i++)
16756  {
16757  assert (rep_all[i] != NULL);
16758  heap_classrepr_dump (thread_p, fp, class_oid, rep_all[i]);
16759  or_free_classrep (rep_all[i]);
16760  }
16761 
16762  fprintf (fp, "\n*** End of dump.\n");
16763  free_and_init (rep_all);
16764  }
16765 
16766  heap_scancache_end (thread_p, &scan_cache);
16767 
16768  if (need_free_classname)
16769  {
16770  free_and_init (classname);
16771  }
16772 }
16773 
16774 /*
16775  * heap_get_btid_from_index_name () - gets the BTID of an index using its name
16776  * and OID of class
16777  *
16778  * return: NO_ERROR, or error code
16779  * thread_p(in) : thread context
16780  * p_class_oid(in): OID of class
16781  * index_name(in) : name of index
16782  * p_found_btid(out): the BTREE ID of index
16783  *
16784  * Note : the 'p_found_btid' argument must be a pointer to a BTID value,
16785  * the found BTID is 'BTID_COPY-ed' into it.
16786  * Null arguments are not allowed.
16787  * If an index name is not found, the 'p_found_btid' is returned as
16788  * NULL BTID and no error is set.
16789  *
16790  */
16791 int
16792 heap_get_btid_from_index_name (THREAD_ENTRY * thread_p, const OID * p_class_oid, const char *index_name,
16793  BTID * p_found_btid)
16794 {
16795  int error = NO_ERROR;
16796  int classrepr_cacheindex = -1;
16797  int idx_cnt;
16798  OR_CLASSREP *classrepr = NULL;
16799  OR_INDEX *curr_index = NULL;
16800 
16801  assert (p_found_btid != NULL);
16802  assert (p_class_oid != NULL);
16803  assert (index_name != NULL);
16804 
16805  BTID_SET_NULL (p_found_btid);
16806 
16807  /* get the BTID associated from the index name : the only structure containing this info is OR_CLASSREP */
16808 
16809  /* get class representation */
16810  classrepr = heap_classrepr_get (thread_p, (OID *) p_class_oid, NULL, NULL_REPRID, &classrepr_cacheindex);
16811 
16812  if (classrepr == NULL)
16813  {
16814  error = er_errid ();
16815  if (error == NO_ERROR)
16816  {
16817  assert (error != NO_ERROR);
16818  error = ER_FAILED;
16819  }
16820  goto exit;
16821  }
16822 
16823  /* iterate through indexes looking for index name */
16824  for (idx_cnt = 0, curr_index = classrepr->indexes; idx_cnt < classrepr->n_indexes; idx_cnt++, curr_index++)
16825  {
16826  if (curr_index == NULL)
16827  {
16828  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_UNEXPECTED, 1, "Bad index information in class representation.");
16829  error = ER_UNEXPECTED;
16830  goto exit_cleanup;
16831  }
16832 
16833  if (intl_identifier_casecmp (curr_index->btname, index_name) == 0)
16834  {
16835  BTID_COPY (p_found_btid, &(curr_index->btid));
16836  break;
16837  }
16838  }
16839 
16840 exit_cleanup:
16841  if (classrepr)
16842  {
16843  heap_classrepr_free_and_init (classrepr, &classrepr_cacheindex);
16844  }
16845 
16846 exit:
16847  return error;
16848 }
16849 
16850 /*
16851  * heap_object_upgrade_domain - upgrades a single attibute in an instance from
16852  * the domain of current representation to the
16853  * domain of the last representation.
16854  *
16855  * return: error code , NO_ERROR if no error occured
16856  * thread_p(in) : thread context
16857  * upd_scancache(in): scan context
16858  * attr_info(in): aatribute info structure
16859  * oid(in): the oid of the object to process
16860  * att_id(in): attribute id within the class (same as in schema)
16861  *
16862  * Note : this function is used in ALTER CHANGE (with type change syntax)
16863  */
16864 int
16866  OID * oid, const ATTR_ID att_id)
16867 {
16868  int i = 0, error = NO_ERROR;
16869  HEAP_ATTRVALUE *value = NULL;
16870  int force_count = 0, updated_n_attrs_id = 0;
16871  ATTR_ID atts_id[1] = { 0 };
16872  DB_VALUE orig_value;
16873 
16874  db_make_null (&orig_value);
16875 
16876  if (upd_scancache == NULL || attr_info == NULL || oid == NULL)
16877  {
16878  error = ER_UNEXPECTED;
16879  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 1, "Unexpected NULL arguments.");
16880  goto exit;
16881  }
16882 
16883  for (i = 0, value = attr_info->values; i < attr_info->num_values; i++, value++)
16884  {
16885  TP_DOMAIN *dest_dom = value->last_attrepr->domain;
16886  bool log_warning = false;
16887  int warning_code = NO_ERROR;
16888  DB_TYPE dest_type;
16889  DB_TYPE src_type = DB_VALUE_DOMAIN_TYPE (&(value->dbvalue));
16890  int curr_prec = 0;
16891  int dest_prec = 0;
16892 
16893  dest_type = TP_DOMAIN_TYPE (dest_dom);
16894 
16895  if (att_id != value->attrid)
16896  {
16897  continue;
16898  }
16899 
16900  if (QSTR_IS_BIT (src_type))
16901  {
16902  curr_prec = db_get_string_length (&(value->dbvalue));
16903  }
16904  else if (QSTR_IS_ANY_CHAR (src_type))
16905  {
16906  if (TP_DOMAIN_CODESET (dest_dom) == INTL_CODESET_RAW_BYTES)
16907  {
16908  curr_prec = db_get_string_size (&(value->dbvalue));
16909  }
16910  else if (!DB_IS_NULL (&(value->dbvalue)))
16911  {
16912  curr_prec = db_get_string_length (&(value->dbvalue));
16913  }
16914  else
16915  {
16916  curr_prec = dest_dom->precision;
16917  }
16918  }
16919 
16920  dest_prec = dest_dom->precision;
16921 
16922  if (QSTR_IS_ANY_CHAR_OR_BIT (src_type) && QSTR_IS_ANY_CHAR_OR_BIT (dest_type))
16923  {
16924  /* check phase of ALTER TABLE .. CHANGE should not allow changing the domains from one flavour to another : */
16925  assert ((QSTR_IS_CHAR (src_type) && QSTR_IS_CHAR (dest_type))
16926  || (!QSTR_IS_CHAR (src_type) && !QSTR_IS_CHAR (dest_type)));
16927 
16928  assert ((QSTR_IS_NATIONAL_CHAR (src_type) && QSTR_IS_NATIONAL_CHAR (dest_type))
16929  || (!QSTR_IS_NATIONAL_CHAR (src_type) && !QSTR_IS_NATIONAL_CHAR (dest_type)));
16930 
16931  assert ((QSTR_IS_BIT (src_type) && QSTR_IS_BIT (dest_type))
16932  || (!QSTR_IS_BIT (src_type) && !QSTR_IS_BIT (dest_type)));
16933 
16934  /* check string truncation */
16935  if (dest_prec < curr_prec)
16936  {
16938  {
16941  goto exit;
16942  }
16943  else
16944  {
16945  /* allow truncation in cast, just warning */
16946  log_warning = true;
16947  warning_code = ER_QPROC_SIZE_STRING_TRUNCATED;
16948  }
16949  }
16950  }
16951 
16952  error = pr_clone_value (&(value->dbvalue), &orig_value);
16953  if (error != NO_ERROR)
16954  {
16955  goto exit;
16956  }
16957 
16958  if (TP_IS_CHAR_TYPE (TP_DOMAIN_TYPE (dest_dom))
16959  && !(TP_IS_CHAR_TYPE (src_type) || src_type == DB_TYPE_ENUMERATION)
16961  {
16962  /* If destination is char/varchar, we need to first cast the value to a string with no precision, then to
16963  * destination type with the desired precision. */
16964  TP_DOMAIN *string_dom;
16965  if (TP_DOMAIN_TYPE (dest_dom) == DB_TYPE_NCHAR || TP_DOMAIN_TYPE (dest_dom) == DB_TYPE_VARNCHAR)
16966  {
16968  }
16969  else
16970  {
16972  }
16973  error = db_value_coerce (&(value->dbvalue), &(value->dbvalue), string_dom);
16974  }
16975 
16976  if (error == NO_ERROR)
16977  {
16978  error = db_value_coerce (&(value->dbvalue), &(value->dbvalue), dest_dom);
16979  }
16980  if (error != NO_ERROR)
16981  {
16982  bool set_default_value = false;
16983  bool set_min_value = false;
16984  bool set_max_value = false;
16985 
16987  {
16990  goto exit;
16991  }
16992 
16993  if (error == ER_IT_DATA_OVERFLOW)
16994  {
16995  int is_positive = -1; /* -1:UNKNOWN, 0:negative, 1:positive */
16996 
16997  /* determine sign of orginal value: */
16998  switch (src_type)
16999  {
17000  case DB_TYPE_INTEGER:
17001  is_positive = ((db_get_int (&value->dbvalue) >= 0) ? 1 : 0);
17002  break;
17003  case DB_TYPE_SMALLINT:
17004  is_positive = ((db_get_short (&value->dbvalue) >= 0) ? 1 : 0);
17005  break;
17006  case DB_TYPE_BIGINT:
17007  is_positive = ((db_get_bigint (&value->dbvalue) >= 0) ? 1 : 0);
17008  break;
17009  case DB_TYPE_FLOAT:
17010  is_positive = ((db_get_float (&value->dbvalue) >= 0) ? 1 : 0);
17011  break;
17012  case DB_TYPE_DOUBLE:
17013  is_positive = ((db_get_double (&value->dbvalue) >= 0) ? 1 : 0);
17014  break;
17015  case DB_TYPE_NUMERIC:
17016  is_positive = numeric_db_value_is_positive (&value->dbvalue);
17017  break;
17018  case DB_TYPE_MONETARY:
17019  is_positive = ((db_get_monetary (&value->dbvalue)->amount >= 0) ? 1 : 0);
17020  break;
17021 
17022  case DB_TYPE_CHAR:
17023  case DB_TYPE_VARCHAR:
17024  case DB_TYPE_NCHAR:
17025  case DB_TYPE_VARNCHAR:
17026  {
17027  char *str = db_get_string (&(value->dbvalue));
17028  char *str_end = str + db_get_string_length (&(value->dbvalue));
17029  char *p = NULL;
17030 
17031  /* get the sign in the source string; look directly into the buffer string, no copy */
17032  p = str;
17033  while (char_isspace (*p) && p < str_end)
17034  {
17035  p++;
17036  }
17037 
17038  is_positive = ((p < str_end && (*p) == '-') ? 0 : 1);
17039  break;
17040  }
17041 
17042  default:
17043  is_positive = -1;
17044  break;
17045  }
17046 
17047  if (is_positive == 1)
17048  {
17049  set_max_value = true;
17050  }
17051  else if (is_positive == 0)
17052  {
17053  set_min_value = true;
17054  }
17055  else
17056  {
17057  set_default_value = true;
17058  }
17059  }
17060  else
17061  {
17062  set_default_value = true;
17063  }
17064  /* clear the error */
17065  er_clear ();
17066 
17067  log_warning = true;
17068 
17069  /* the casted value will be overwritten, so a clear is needed, here */
17070  pr_clear_value (&(value->dbvalue));
17071 
17072  if (set_max_value)
17073  {
17074  /* set max value of destination domain */
17075  error =
17076  db_value_domain_max (&(value->dbvalue), dest_type, dest_prec, dest_dom->scale, dest_dom->codeset,
17077  dest_dom->collation_id, &dest_dom->enumeration);
17078  if (error != NO_ERROR)
17079  {
17080  /* this should not happen */
17081  goto exit;
17082  }
17083 
17084  warning_code = ER_ALTER_CHANGE_CAST_FAILED_SET_MAX;
17085  }
17086  else if (set_min_value)
17087  {
17088  /* set min value of destination domain */
17089  error =
17090  db_value_domain_min (&(value->dbvalue), dest_type, dest_prec, dest_dom->scale, dest_dom->codeset,
17091  dest_dom->collation_id, &dest_dom->enumeration);
17092  if (error != NO_ERROR)
17093  {
17094  /* this should not happen */
17095  goto exit;
17096  }
17097  warning_code = ER_ALTER_CHANGE_CAST_FAILED_SET_MIN;
17098  }
17099  else
17100  {
17101  assert (set_default_value == true);
17102 
17103  /* set default value of destination domain */
17104  error =
17105  db_value_domain_default (&(value->dbvalue), dest_type, dest_prec, dest_dom->scale, dest_dom->codeset,
17106  dest_dom->collation_id, &dest_dom->enumeration);
17107  if (error != NO_ERROR)
17108  {
17109  /* this should not happen */
17110  goto exit;
17111  }
17113  }
17114  }
17115 
17116  if (!DB_IS_NULL (&orig_value))
17117  {
17118  assert (!DB_IS_NULL (&(value->dbvalue)));
17119  }
17120 
17121  if (log_warning)
17122  {
17123  assert (warning_code != NO_ERROR);
17124 
17125  /* Since we don't like to bother callers with the following warning which is just for a logging, it will be
17126  * poped once it is set. */
17127  er_stack_push ();
17128 
17129  if (warning_code == ER_QPROC_SIZE_STRING_TRUNCATED)
17130  {
17131  er_set (ER_WARNING_SEVERITY, ARG_FILE_LINE, warning_code, 1, "ALTER TABLE .. CHANGE");
17132  }
17133  else
17134  {
17135  er_set (ER_WARNING_SEVERITY, ARG_FILE_LINE, warning_code, 0);
17136  }
17137 
17138  /* forget the warning */
17139  er_stack_pop ();
17140  }
17141 
17142  value->state = HEAP_WRITTEN_ATTRVALUE;
17143  atts_id[updated_n_attrs_id] = value->attrid;
17144  updated_n_attrs_id++;
17145 
17146  break;
17147  }
17148 
17149  /* exactly one attribute should be changed */
17150  assert (updated_n_attrs_id == 1);
17151 
17152  if (updated_n_attrs_id != 1 || attr_info->read_classrepr == NULL || attr_info->last_classrepr == NULL
17153  || attr_info->read_classrepr->id >= attr_info->last_classrepr->id)
17154  {
17155  error = ER_UNEXPECTED;
17156  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 1, "Incorrect attribute information.");
17157  goto exit;
17158  }
17159 
17160  /* the class has XCH_M_LOCK */
17161  error =
17162  locator_attribute_info_force (thread_p, &upd_scancache->node.hfid, oid, attr_info, atts_id, updated_n_attrs_id,
17163  LC_FLUSH_UPDATE, SINGLE_ROW_UPDATE, upd_scancache, &force_count, false,
17166  if (error != NO_ERROR)
17167  {
17169  {
17170  error = NO_ERROR;
17171  }
17172 
17173  goto exit;
17174  }
17175 
17176 exit:
17177  pr_clear_value (&orig_value);
17178  return error;
17179 }
17180 
17181 /*
17182  * heap_eval_function_index - evaluate the result of the expression used in
17183  * a function index.
17184  *
17185  * thread_p(in) : thread context
17186  * func_index_info(in): function index information
17187  * n_atts(in): number of attributes involved
17188  * att_ids(in): attribute identifiers
17189  * attr_info(in): attribute info structure
17190  * recdes(in): record descriptor
17191  * btid_index(in): id of the function index used
17192  * func_pred_cache(in): cached function index expressions
17193  * result(out): result of the function expression
17194  * fi_domain(out): domain of function index (from regu_var)
17195  * return: error code
17196  */
17197 static int
17198 heap_eval_function_index (THREAD_ENTRY * thread_p, FUNCTION_INDEX_INFO * func_index_info, int n_atts, int *att_ids,
17199  HEAP_CACHE_ATTRINFO * attr_info, RECDES * recdes, int btid_index, DB_VALUE * result,
17200  FUNC_PRED_UNPACK_INFO * func_pred_cache, TP_DOMAIN ** fi_domain)
17201 {
17202  int error = NO_ERROR;
17203  OR_INDEX *index = NULL;
17204  char *expr_stream = NULL;
17205  int expr_stream_size = 0;
17207  void *unpack_info = NULL;
17208  DB_VALUE *res = NULL;
17209  int i, nr_atts;
17210  ATTR_ID *atts = NULL;
17211  bool atts_free = false, attrinfo_clear = false, attrinfo_end = false;
17212  HEAP_CACHE_ATTRINFO *cache_attr_info = NULL;
17213 
17214  if (func_index_info == NULL && btid_index > -1 && n_atts == -1)
17215  {
17216  index = &(attr_info->last_classrepr->indexes[btid_index]);
17217  if (func_pred_cache)
17218  {
17219  func_pred = (FUNC_PRED *) func_pred_cache->func_pred;
17220  cache_attr_info = func_pred->cache_attrinfo;
17221  nr_atts = index->n_atts;
17222  }
17223  else
17224  {
17225  expr_stream = index->func_index_info->expr_stream;
17226  expr_stream_size = index->func_index_info->expr_stream_size;
17227  nr_atts = index->n_atts;
17228  atts = (ATTR_ID *) malloc (nr_atts * sizeof (ATTR_ID));
17229  if (atts == NULL)
17230  {
17232  error = ER_FAILED;
17233  goto end;
17234  }
17235  atts_free = true;
17236  for (i = 0; i < nr_atts; i++)
17237  {
17238  atts[i] = index->atts[i]->id;
17239  }
17240  cache_attr_info = attr_info;
17241  }
17242  }
17243  else
17244  {
17245  /* load index case */
17246  expr_stream = func_index_info->expr_stream;
17247  expr_stream_size = func_index_info->expr_stream_size;
17248  nr_atts = n_atts;
17249  atts = att_ids;
17250  cache_attr_info = ((FUNC_PRED *) func_index_info->expr)->cache_attrinfo;
17251  func_pred = (FUNC_PRED *) func_index_info->expr;
17252  }
17253 
17254  if (func_index_info == NULL)
17255  {
17256  /* insert case, read the values */
17257  if (func_pred == NULL)
17258  {
17259  if (stx_map_stream_to_func_pred (thread_p, (FUNC_PRED **) (&func_pred), expr_stream, expr_stream_size,
17260  &unpack_info))
17261  {
17262  error = ER_FAILED;
17263  goto end;
17264  }
17265  cache_attr_info = func_pred->cache_attrinfo;
17266 
17267  if (heap_attrinfo_start (thread_p, &attr_info->class_oid, nr_atts, atts, cache_attr_info) != NO_ERROR)
17268  {
17269  error = ER_FAILED;
17270  goto end;
17271  }
17272  attrinfo_end = true;
17273  }
17274 
17275  if (heap_attrinfo_read_dbvalues (thread_p, &attr_info->inst_oid, recdes, NULL, cache_attr_info) != NO_ERROR)
17276  {
17277  error = ER_FAILED;
17278  goto end;
17279  }
17280  attrinfo_clear = true;
17281  }
17282 
17283  error =
17284  fetch_peek_dbval (thread_p, func_pred->func_regu, NULL, &cache_attr_info->class_oid, &cache_attr_info->inst_oid,
17285  NULL, &res);
17286  if (error == NO_ERROR)
17287  {
17288  pr_clone_value (res, result);
17289  }
17290 
17291  if (fi_domain != NULL)
17292  {
17293  *fi_domain = tp_domain_cache (func_pred->func_regu->domain);
17294  }
17295 
17296  if (res != NULL && res->need_clear == true)
17297  {
17298  pr_clear_value (res);
17299  }
17300 
17301 end:
17302  if (attrinfo_clear && cache_attr_info)
17303  {
17304  heap_attrinfo_clear_dbvalues (cache_attr_info);
17305  }
17306  if (attrinfo_end && cache_attr_info)
17307  {
17308  heap_attrinfo_end (thread_p, cache_attr_info);
17309  }
17310  if (atts_free && atts)
17311  {
17312  free_and_init (atts);
17313  }
17314  if (unpack_info)
17315  {
17316  (void) qexec_clear_func_pred (thread_p, func_pred);
17317  stx_free_additional_buff (thread_p, unpack_info);
17318  stx_free_xasl_unpack_info (unpack_info);
17319  db_private_free_and_init (thread_p, unpack_info);
17320  }
17321 
17322  return error;
17323 }
17324 
17325 /*
17326  * heap_init_func_pred_unpack_info () - if function indexes are found,
17327  * each function expression is unpacked and cached
17328  * in order to be used during bulk inserts
17329  * (insert ... select).
17330  * return: NO_ERROR, or ER_FAILED
17331  * thread_p(in): thread entry
17332  * attr_info(in): heap_cache_attrinfo
17333  * class_oid(in): the class oid
17334  * func_indx_preds(out):
17335  */
17336 int
17337 heap_init_func_pred_unpack_info (THREAD_ENTRY * thread_p, HEAP_CACHE_ATTRINFO * attr_info, const OID * class_oid,
17338  FUNC_PRED_UNPACK_INFO ** func_indx_preds)
17339 {
17340  OR_FUNCTION_INDEX *fi_info = NULL;
17341  int n_indexes;
17342  int i, j;
17343  int *att_ids = NULL;
17344  int error_status = NO_ERROR;
17345  OR_INDEX *idx;
17346  FUNC_PRED_UNPACK_INFO *fi_preds = NULL;
17347  int *attr_info_started = NULL;
17348  size_t size;
17349 
17350  if (attr_info == NULL || class_oid == NULL || func_indx_preds == NULL)
17351  {
17352  return ER_FAILED;
17353  }
17354 
17355  *func_indx_preds = NULL;
17356 
17357  n_indexes = attr_info->last_classrepr->n_indexes;
17358  for (i = 0; i < n_indexes; i++)
17359  {
17360  idx = &(attr_info->last_classrepr->indexes[i]);
17361  fi_info = idx->func_index_info;
17362  if (fi_info)
17363  {
17364  if (fi_preds == NULL)
17365  {
17366  size = n_indexes * sizeof (FUNC_PRED_UNPACK_INFO);
17367  fi_preds = (FUNC_PRED_UNPACK_INFO *) db_private_alloc (thread_p, size);
17368  if (!fi_preds)
17369  {
17371  error_status = ER_FAILED;
17372  goto error;
17373  }
17374  for (j = 0; j < n_indexes; j++)
17375  {
17376  fi_preds[j].func_pred = NULL;
17377  fi_preds[j].unpack_info = NULL;
17378  }
17379 
17380  size = n_indexes * sizeof (int);
17381  attr_info_started = (int *) db_private_alloc (thread_p, size);
17382  if (attr_info_started == NULL)
17383  {
17385  error_status = ER_FAILED;
17386  goto error;
17387  }
17388  for (j = 0; j < n_indexes; j++)
17389  {
17390  attr_info_started[j] = 0;
17391  }
17392  }
17393 
17394  if (stx_map_stream_to_func_pred (thread_p, (FUNC_PRED **) (&(fi_preds[i].func_pred)),
17395  fi_info->expr_stream, fi_info->expr_stream_size, &(fi_preds[i].unpack_info)))
17396  {
17397  error_status = ER_FAILED;
17398  goto error;
17399  }
17400 
17401  size = idx->n_atts * sizeof (ATTR_ID);
17402  att_ids = (ATTR_ID *) db_private_alloc (thread_p, size);
17403  if (!att_ids)
17404  {
17406  error_status = ER_FAILED;
17407  goto error;
17408  }
17409 
17410  for (j = 0; j < idx->n_atts; j++)
17411  {
17412  att_ids[j] = idx->atts[j]->id;
17413  }
17414 
17415  if (heap_attrinfo_start (thread_p, class_oid, idx->n_atts, att_ids,
17416  ((FUNC_PRED *) fi_preds[i].func_pred)->cache_attrinfo) != NO_ERROR)
17417  {
17418  error_status = ER_FAILED;
17419  goto error;
17420  }
17421 
17422  attr_info_started[i] = 1;
17423 
17424  if (att_ids)
17425  {
17426  db_private_free_and_init (thread_p, att_ids);
17427  }
17428  }
17429  }
17430 
17431  if (attr_info_started != NULL)
17432  {
17433  db_private_free_and_init (thread_p, attr_info_started);
17434  }
17435 
17436  *func_indx_preds = fi_preds;
17437 
17438  return NO_ERROR;
17439 
17440 error:
17441  if (att_ids)
17442  {
17443  db_private_free_and_init (thread_p, att_ids);
17444  }
17445  heap_free_func_pred_unpack_info (thread_p, n_indexes, fi_preds, attr_info_started);
17446  if (attr_info_started != NULL)
17447  {
17448  db_private_free_and_init (thread_p, attr_info_started);
17449  }
17450 
17451  return error_status;
17452 }
17453 
17454 /*
17455  * heap_free_func_pred_unpack_info () -
17456  * return:
17457  * thread_p(in): thread entry
17458  * n_indexes(in): number of indexes
17459  * func_indx_preds(in):
17460  * attr_info_started(in): array of int (1 if corresponding cache_attrinfo
17461  * must be cleaned, 0 otherwise)
17462  * if null all cache_attrinfo must be cleaned
17463  */
17464 void
17465 heap_free_func_pred_unpack_info (THREAD_ENTRY * thread_p, int n_indexes, FUNC_PRED_UNPACK_INFO * func_indx_preds,
17466  int *attr_info_started)
17467 {
17468  int i;
17469 
17470  if (func_indx_preds == NULL)
17471  {
17472  return;
17473  }
17474 
17475  for (i = 0; i < n_indexes; i++)
17476  {
17477  if (func_indx_preds[i].func_pred)
17478  {
17479  if (attr_info_started == NULL || attr_info_started[i])
17480  {
17481  assert (((FUNC_PRED *) func_indx_preds[i].func_pred)->cache_attrinfo);
17482  (void) heap_attrinfo_end (thread_p, ((FUNC_PRED *) func_indx_preds[i].func_pred)->cache_attrinfo);
17483  }
17484  (void) qexec_clear_func_pred (thread_p, (FUNC_PRED *) func_indx_preds[i].func_pred);
17485  }
17486 
17487  if (func_indx_preds[i].unpack_info)
17488  {
17489  stx_free_additional_buff (thread_p, func_indx_preds[i].unpack_info);
17490  stx_free_xasl_unpack_info (func_indx_preds[i].unpack_info);
17491  db_private_free_and_init (thread_p, func_indx_preds[i].unpack_info);
17492  }
17493  }
17494  db_private_free_and_init (thread_p, func_indx_preds);
17495 }
17496 
17497 /*
17498  * heap_header_capacity_start_scan () - start scan function for 'show heap ...'
17499  * return: NO_ERROR, or ER_code
17500  * thread_p(in): thread entry
17501  * show_type(in):
17502  * arg_values(in):
17503  * arg_cnt(in):
17504  * ptr(in/out): 'show heap' context
17505  */
17506 int
17507 heap_header_capacity_start_scan (THREAD_ENTRY * thread_p, int show_type, DB_VALUE ** arg_values, int arg_cnt,
17508  void **ptr)
17509 {
17510  int error = NO_ERROR;
17511  char *class_name = NULL;
17513  OID class_oid;
17515  HEAP_SHOW_SCAN_CTX *ctx = NULL;
17516  OR_PARTITION *parts = NULL;
17517  int i = 0;
17518  int parts_count = 0;
17519  bool is_all = false;
17520 
17521  assert (arg_cnt == 2);
17522  assert (DB_VALUE_TYPE (arg_values[0]) == DB_TYPE_CHAR);
17523  assert (DB_VALUE_TYPE (arg_values[1]) == DB_TYPE_INTEGER);
17524 
17525  *ptr = NULL;
17526 
17527  class_name = db_get_string (arg_values[0]);
17528 
17529  partition_type = (DB_CLASS_PARTITION_TYPE) db_get_int (arg_values[1]);
17530 
17531  ctx = (HEAP_SHOW_SCAN_CTX *) db_private_alloc (thread_p, sizeof (HEAP_SHOW_SCAN_CTX));
17532  if (ctx == NULL)
17533  {
17534  ASSERT_ERROR ();
17535  error = er_errid ();
17536  goto cleanup;
17537  }
17538  memset (ctx, 0, sizeof (HEAP_SHOW_SCAN_CTX));
17539 
17540  status = xlocator_find_class_oid (thread_p, class_name, &class_oid, S_LOCK);
17542  {
17544  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, error, 1, class_name);
17545  goto cleanup;
17546  }
17547 
17548  is_all = (show_type == SHOWSTMT_ALL_HEAP_HEADER || show_type == SHOWSTMT_ALL_HEAP_CAPACITY);
17549 
17550  if (is_all && partition_type == DB_PARTITIONED_CLASS)
17551  {
17552  error = heap_get_class_partitions (thread_p, &class_oid, &parts, &parts_count);
17553  if (error != NO_ERROR)
17554  {
17555  goto cleanup;
17556  }
17557 
17558  ctx->hfids = (HFID *) db_private_alloc (thread_p, parts_count * sizeof (HFID));
17559  if (ctx->hfids == NULL)
17560  {
17561  ASSERT_ERROR ();
17562  error = er_errid ();
17563  goto cleanup;
17564  }
17565 
17566  for (i = 0; i < parts_count; i++)
17567  {
17568  HFID_COPY (&ctx->hfids[i], &parts[i].class_hfid);
17569  }
17570 
17571  ctx->hfids_count = parts_count;
17572  }
17573  else
17574  {
17575  ctx->hfids = (HFID *) db_private_alloc (thread_p, sizeof (HFID));
17576  if (ctx->hfids == NULL)
17577  {
17578  ASSERT_ERROR ();
17579  error = er_errid ();
17580  goto cleanup;
17581  }
17582 
17583  error = heap_get_hfid_from_class_oid (thread_p, &class_oid, &ctx->hfids[0]);
17584  if (error != NO_ERROR)
17585  {
17586  goto cleanup;
17587  }
17588 
17589  ctx->hfids_count = 1;
17590  }
17591 
17592  *ptr = ctx;
17593  ctx = NULL;
17594 
17595 cleanup:
17596 
17597  if (parts != NULL)
17598  {
17599  heap_clear_partition_info (thread_p, parts, parts_count);
17600  }
17601 
17602  if (ctx != NULL)
17603  {
17604  if (ctx->hfids != NULL)
17605  {
17606  db_private_free (thread_p, ctx->hfids);
17607  }
17608 
17609  db_private_free_and_init (thread_p, ctx);
17610  }
17611 
17612  return error;
17613 }
17614 
17615 /*
17616  * heap_header_next_scan () - next scan function for
17617  * 'show (all) heap header'
17618  * return: NO_ERROR, or ER_code
17619  * thread_p(in):
17620  * cursor(in):
17621  * out_values(in/out):
17622  * out_cnt(in):
17623  * ptr(in): 'show heap' context
17624  */
17625 SCAN_CODE
17626 heap_header_next_scan (THREAD_ENTRY * thread_p, int cursor, DB_VALUE ** out_values, int out_cnt, void *ptr)
17627 {
17628  int error = NO_ERROR;
17629  HEAP_SHOW_SCAN_CTX *ctx = NULL;
17630  VPID vpid;
17631  HEAP_HDR_STATS *heap_hdr = NULL;
17632  RECDES hdr_recdes;
17633  int i = 0;
17634  int idx = 0;
17635  PAGE_PTR pgptr = NULL;
17636  HFID *hfid_p;
17637  char *class_name = NULL;
17638  int avg_length = 0;
17639  char buf[512] = { 0 };
17640  char temp[64] = { 0 };
17641  char *buf_p, *end;
17642 
17643  ctx = (HEAP_SHOW_SCAN_CTX *) ptr;
17644 
17645  if (cursor >= ctx->hfids_count)
17646  {
17647  return S_END;
17648  }
17649 
17650  hfid_p = &ctx->hfids[cursor];
17651 
17652  vpid.volid = hfid_p->vfid.volid;
17653  vpid.pageid = hfid_p->hpgid;
17654 
17655  pgptr = heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, S_LOCK, NULL, NULL);
17656  if (pgptr == NULL)
17657  {
17658  ASSERT_ERROR ();
17659  error = er_errid ();
17660  goto cleanup;
17661  }
17662 
17663  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &hdr_recdes, PEEK) != S_SUCCESS)
17664  {
17667  goto cleanup;
17668  }
17669 
17670  heap_hdr = (HEAP_HDR_STATS *) hdr_recdes.data;
17671 
17672  if (heap_get_class_name (thread_p, &(heap_hdr->class_oid), &class_name) != NO_ERROR || class_name == NULL)
17673  {
17675  goto cleanup;
17676  }
17677 
17678  idx = 0;
17679 
17680  /* Class_name */
17681  error = db_make_string_copy (out_values[idx], class_name);
17682  idx++;
17683  if (error != NO_ERROR)
17684  {
17685  goto cleanup;
17686  }
17687 
17688  /* Class_oid */
17689  oid_to_string (buf, sizeof (buf), &heap_hdr->class_oid);
17690  error = db_make_string_copy (out_values[idx], buf);
17691  idx++;
17692  if (error != NO_ERROR)
17693  {
17694  goto cleanup;
17695  }
17696 
17697  /* HFID */
17698  db_make_int (out_values[idx], hfid_p->vfid.volid);
17699  idx++;
17700 
17701  db_make_int (out_values[idx], hfid_p->vfid.fileid);
17702  idx++;
17703 
17704  db_make_int (out_values[idx], hfid_p->hpgid);
17705  idx++;
17706 
17707  /* Overflow_vfid */
17708  vfid_to_string (buf, sizeof (buf), &heap_hdr->ovf_vfid);
17709  error = db_make_string_copy (out_values[idx], buf);
17710  idx++;
17711  if (error != NO_ERROR)
17712  {
17713  goto cleanup;
17714  }
17715 
17716  /* Next_vpid */
17717  vpid_to_string (buf, sizeof (buf), &heap_hdr->next_vpid);
17718  error = db_make_string_copy (out_values[idx], buf);
17719  idx++;
17720  if (error != NO_ERROR)
17721  {
17722  goto cleanup;
17723  }
17724 
17725  /* Unfill space */
17726  db_make_int (out_values[idx], heap_hdr->unfill_space);
17727  idx++;
17728 
17729  /* Estimated */
17730  db_make_bigint (out_values[idx], heap_hdr->estimates.num_pages);
17731  idx++;
17732 
17733  db_make_bigint (out_values[idx], heap_hdr->estimates.num_recs);
17734  idx++;
17735 
17736  avg_length = ((heap_hdr->estimates.num_recs > 0)
17737  ? (int) ((heap_hdr->estimates.recs_sumlen / (float) heap_hdr->estimates.num_recs) + 0.9) : 0);
17738  db_make_int (out_values[idx], avg_length);
17739  idx++;
17740 
17741  db_make_int (out_values[idx], heap_hdr->estimates.num_high_best);
17742  idx++;
17743 
17744  db_make_int (out_values[idx], heap_hdr->estimates.num_other_high_best);
17745  idx++;
17746 
17747  db_make_int (out_values[idx], heap_hdr->estimates.head);
17748  idx++;
17749 
17750  /* Estimates_best_list */
17751  buf_p = buf;
17752  end = buf + sizeof (buf);
17753  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
17754  {
17755  if (i > 0)
17756  {
17757  if (fill_string_to_buffer (&buf_p, end, ", ") == -1)
17758  {
17759  break;
17760  }
17761  }
17762 
17763  heap_bestspace_to_string (temp, sizeof (temp), heap_hdr->estimates.best + i);
17764  if (fill_string_to_buffer (&buf_p, end, temp) == -1)
17765  {
17766  break;
17767  }
17768  }
17769 
17770  error = db_make_string_copy (out_values[idx], buf);
17771  idx++;
17772  if (error != NO_ERROR)
17773  {
17774  goto cleanup;
17775  }
17776 
17777  db_make_int (out_values[idx], heap_hdr->estimates.num_second_best);
17778  idx++;
17779 
17780  db_make_int (out_values[idx], heap_hdr->estimates.head_second_best);
17781  idx++;
17782 
17783  db_make_int (out_values[idx], heap_hdr->estimates.tail_second_best);
17784  idx++;
17785 
17786  db_make_int (out_values[idx], heap_hdr->estimates.num_substitutions);
17787  idx++;
17788 
17789  /* Estimates_second_best */
17790  buf_p = buf;
17791  end = buf + sizeof (buf);
17792  for (i = 0; i < HEAP_NUM_BEST_SPACESTATS; i++)
17793  {
17794  if (i > 0)
17795  {
17796  if (fill_string_to_buffer (&buf_p, end, ", ") == -1)
17797  {
17798  break;
17799  }
17800  }
17801 
17802  vpid_to_string (temp, sizeof (temp), heap_hdr->estimates.second_best + i);
17803  if (fill_string_to_buffer (&buf_p, end, temp) == -1)
17804  {
17805  break;
17806  }
17807  }
17808 
17809  error = db_make_string_copy (out_values[idx], buf);
17810  idx++;
17811  if (error != NO_ERROR)
17812  {
17813  goto cleanup;
17814  }
17815 
17816  vpid_to_string (buf, sizeof (buf), &heap_hdr->estimates.last_vpid);
17817  error = db_make_string_copy (out_values[idx], buf);
17818  idx++;
17819  if (error != NO_ERROR)
17820  {
17821  goto cleanup;
17822  }
17823 
17824  vpid_to_string (buf, sizeof (buf), &heap_hdr->estimates.full_search_vpid);
17825  error = db_make_string_copy (out_values[idx], buf);
17826  idx++;
17827  if (error != NO_ERROR)
17828  {
17829  goto cleanup;
17830  }
17831 
17832  assert (idx == out_cnt);
17833 
17834 cleanup:
17835 
17836  if (pgptr != NULL)
17837  {
17838  pgbuf_unfix_and_init (thread_p, pgptr);
17839  }
17840 
17841  if (class_name != NULL)
17842  {
17843  free_and_init (class_name);
17844  }
17845 
17846  return (error == NO_ERROR) ? S_SUCCESS : S_ERROR;
17847 }
17848 
17849 /*
17850  * heap_capacity_next_scan () - next scan function for
17851  * 'show (all) heap capacity'
17852  * return: NO_ERROR, or ER_code
17853  * thread_p(in):
17854  * cursor(in):
17855  * out_values(in/out):
17856  * out_cnt(in):
17857  * ptr(in): 'show heap' context
17858  */
17859 SCAN_CODE
17860 heap_capacity_next_scan (THREAD_ENTRY * thread_p, int cursor, DB_VALUE ** out_values, int out_cnt, void *ptr)
17861 {
17862  int error = NO_ERROR;
17863  HEAP_SHOW_SCAN_CTX *ctx = NULL;
17864  HFID *hfid_p = NULL;
17865  HEAP_CACHE_ATTRINFO attr_info;
17866  OR_CLASSREP *repr = NULL;
17867  char *classname = NULL;
17868  char class_oid_str[64] = { 0 };
17869  bool is_heap_attrinfo_started = false;
17870  INT64 num_recs = 0;
17871  INT64 num_relocated_recs = 0;
17872  INT64 num_overflowed_recs = 0;
17873  INT64 num_pages = 0;
17874  int avg_rec_len = 0;
17875  int avg_free_space_per_page = 0;
17876  int avg_free_space_without_last_page = 0;
17877  int avg_overhead_per_page = 0;
17878  int val = 0;
17879  int idx = 0;
17880  FILE_DESCRIPTORS fdes;
17881 
17882  ctx = (HEAP_SHOW_SCAN_CTX *) ptr;
17883 
17884  if (cursor >= ctx->hfids_count)
17885  {
17886  return S_END;
17887  }
17888 
17889  hfid_p = &ctx->hfids[cursor];
17890 
17891  error =
17892  heap_get_capacity (thread_p, hfid_p, &num_recs, &num_relocated_recs, &num_overflowed_recs, &num_pages,
17893  &avg_free_space_per_page, &avg_free_space_without_last_page, &avg_rec_len,
17894  &avg_overhead_per_page);
17895  if (error != NO_ERROR)
17896  {
17897  goto cleanup;
17898  }
17899 
17900  error = file_descriptor_get (thread_p, &hfid_p->vfid, &fdes);
17901  if (error != NO_ERROR)
17902  {
17903  ASSERT_ERROR ();
17904  goto cleanup;
17905  }
17906 
17907  error = heap_attrinfo_start (thread_p, &fdes.heap.class_oid, -1, NULL, &attr_info);
17908  if (error != NO_ERROR)
17909  {
17910  goto cleanup;
17911  }
17912 
17913  is_heap_attrinfo_started = true;
17914 
17915  repr = attr_info.last_classrepr;
17916  if (repr == NULL)
17917  {
17920  fdes.heap.class_oid.slotid);
17921  goto cleanup;
17922  }
17923 
17924  if (heap_get_class_name (thread_p, &fdes.heap.class_oid, &classname) != NO_ERROR || classname == NULL)
17925  {
17927  goto cleanup;
17928  }
17929 
17930  idx = 0;
17931 
17932  error = db_make_string_copy (out_values[idx], classname);
17933  idx++;
17934  if (error != NO_ERROR)
17935  {
17936  goto cleanup;
17937  }
17938 
17939  oid_to_string (class_oid_str, sizeof (class_oid_str), &fdes.heap.class_oid);
17940  error = db_make_string_copy (out_values[idx], class_oid_str);
17941  idx++;
17942  if (error != NO_ERROR)
17943  {
17944  goto cleanup;
17945  }
17946 
17947  db_make_int (out_values[idx], hfid_p->vfid.volid);
17948  idx++;
17949 
17950  db_make_int (out_values[idx], hfid_p->vfid.fileid);
17951  idx++;
17952 
17953  db_make_int (out_values[idx], hfid_p->hpgid);
17954  idx++;
17955 
17956  db_make_bigint (out_values[idx], num_recs);
17957  idx++;
17958 
17959  db_make_bigint (out_values[idx], num_relocated_recs);
17960  idx++;
17961 
17962  db_make_bigint (out_values[idx], num_overflowed_recs);
17963  idx++;
17964 
17965  db_make_bigint (out_values[idx], num_pages);
17966  idx++;
17967 
17968  db_make_int (out_values[idx], avg_rec_len);
17969  idx++;
17970 
17971  db_make_int (out_values[idx], avg_free_space_per_page);
17972  idx++;
17973 
17974  db_make_int (out_values[idx], avg_free_space_without_last_page);
17975  idx++;
17976 
17977  db_make_int (out_values[idx], avg_overhead_per_page);
17978  idx++;
17979 
17980  db_make_int (out_values[idx], repr->id);
17981  idx++;
17982 
17983  db_make_int (out_values[idx], repr->n_attributes);
17984  idx++;
17985 
17986  val = repr->n_attributes - repr->n_variable - repr->n_shared_attrs - repr->n_class_attrs;
17987  db_make_int (out_values[idx], val);
17988  idx++;
17989 
17990  db_make_int (out_values[idx], repr->n_variable);
17991  idx++;
17992 
17993  db_make_int (out_values[idx], repr->n_shared_attrs);
17994  idx++;
17995 
17996  db_make_int (out_values[idx], repr->n_class_attrs);
17997  idx++;
17998 
17999  db_make_int (out_values[idx], repr->fixed_length);
18000  idx++;
18001 
18002  assert (idx == out_cnt);
18003 
18004 cleanup:
18005 
18006  if (classname != NULL)
18007  {
18008  free_and_init (classname);
18009  }
18010 
18011  if (is_heap_attrinfo_started)
18012  {
18013  heap_attrinfo_end (thread_p, &attr_info);
18014  }
18015 
18016  return (error == NO_ERROR) ? S_SUCCESS : S_ERROR;
18017 }
18018 
18019 /*
18020  * heap_header_capacity_end_scan() - end scan function of
18021  * 'show (all) heap ...'
18022  * return: NO_ERROR, or ER_code
18023  * thread_p(in):
18024  * ptr(in/out): 'show heap' context
18025  */
18026 int
18028 {
18029  HEAP_SHOW_SCAN_CTX *ctx;
18030 
18031  ctx = (HEAP_SHOW_SCAN_CTX *) (*ptr);
18032 
18033  if (ctx == NULL)
18034  {
18035  return NO_ERROR;
18036  }
18037 
18038  if (ctx->hfids != NULL)
18039  {
18040  db_private_free (thread_p, ctx->hfids);
18041  }
18042 
18043  db_private_free (thread_p, ctx);
18044  *ptr = NULL;
18045 
18046  return NO_ERROR;
18047 }
18048 
18049 static char *
18050 heap_bestspace_to_string (char *buf, int buf_size, const HEAP_BESTSPACE * hb)
18051 {
18052  snprintf (buf, buf_size, "((%d|%d), %d)", hb->vpid.volid, hb->vpid.pageid, hb->freespace);
18053  buf[buf_size - 1] = '\0';
18054 
18055  return buf;
18056 }
18057 
18058 /*
18059  * fill_string_to_buffer () - fill string into buffer
18060  *
18061  * -----------------------------
18062  * | buffer |
18063  * -----------------------------
18064  * ^ ^
18065  * | |
18066  * start end
18067  *
18068  * return: the count of characters (not include '\0') which has been
18069  * filled into buffer; -1 means error.
18070  * start(in/out): After filling, start move to the '\0' position.
18071  * end(in): The first unavailble position.
18072  * str(in):
18073  */
18074 static int
18075 fill_string_to_buffer (char **start, char *end, const char *str)
18076 {
18077  int len = (int) strlen (str);
18078 
18079  if (*start + len >= end)
18080  {
18081  return -1;
18082  }
18083 
18084  memcpy (*start, str, len);
18085  *start += len;
18086  **start = '\0';
18087 
18088  return len;
18089 }
18090 
18091 /*
18092  * heap_get_page_info () - Obtain page information.
18093  *
18094  * return : SCAN_CODE.
18095  * thread_p (in) : Thread entry.
18096  * cls_oid (in) : Class object identifier.
18097  * hfid (in) : Heap file identifier.
18098  * vpid (in) : Page identifier.
18099  * pgptr (in) : Pointer to the cached page.
18100  * page_info (in) : Pointers to DB_VALUES where page information is stored.
18101  */
18102 static SCAN_CODE
18103 heap_get_page_info (THREAD_ENTRY * thread_p, const OID * cls_oid, const HFID * hfid, const VPID * vpid,
18104  const PAGE_PTR pgptr, DB_VALUE ** page_info)
18105 {
18106  RECDES recdes;
18107 
18108  if (page_info == NULL)
18109  {
18110  /* no need to get page info */
18111  return S_SUCCESS;
18112  }
18113 
18114  if (spage_get_record (thread_p, pgptr, HEAP_HEADER_AND_CHAIN_SLOTID, &recdes, PEEK) != S_SUCCESS)
18115  {
18116  /* Error obtaining header slot */
18117  return S_ERROR;
18118  }
18119 
18120  db_make_oid (page_info[HEAP_PAGE_INFO_CLASS_OID], cls_oid);
18121 
18122  if (hfid->hpgid == vpid->pageid && hfid->vfid.volid == vpid->volid)
18123  {
18124  HEAP_HDR_STATS *hdr_stats = (HEAP_HDR_STATS *) recdes.data;
18126  db_make_int (page_info[HEAP_PAGE_INFO_NEXT_PAGE], hdr_stats->next_vpid.pageid);
18127  }
18128  else
18129  {
18130  HEAP_CHAIN *chain = (HEAP_CHAIN *) recdes.data;
18133  }
18134 
18135  /* Obtain information from spage header */
18136  return spage_get_page_header_info (pgptr, page_info);
18137 }
18138 
18139 /*
18140  * heap_page_next () - Advance to next page in chain and obtain information.
18141  *
18142  * return : SCAN_CODE.
18143  * thread_p (in) : Thread entry.
18144  * class_oid (in) : Class object identifier.
18145  * hfid (in) : Heap file identifier.
18146  * next_vpid (in) : Next page identifier.
18147  * cache_pageinfo (in) : Pointers to DB_VALUEs where page information is
18148  * stored.
18149  */
18150 SCAN_CODE
18151 heap_page_next (THREAD_ENTRY * thread_p, const OID * class_oid, const HFID * hfid, VPID * next_vpid,
18152  DB_VALUE ** cache_pageinfo)
18153 {
18154  PGBUF_WATCHER pg_watcher;
18155  PGBUF_WATCHER old_pg_watcher;
18156  SCAN_CODE scan = S_SUCCESS;
18157 
18160 
18161  /* get next page */
18162  if (VPID_ISNULL (next_vpid))
18163  {
18164  /* set to first page */
18165  next_vpid->pageid = hfid->hpgid;
18166  next_vpid->volid = hfid->vfid.volid;
18167  }
18168  else
18169  {
18170  pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, next_vpid, OLD_PAGE, S_LOCK, NULL, &pg_watcher);
18171  if (pg_watcher.pgptr == NULL)
18172  {
18173  return S_ERROR;
18174  }
18175  /* get next page */
18176  heap_vpid_next (thread_p, hfid, pg_watcher.pgptr, next_vpid);
18177  if (OID_ISNULL (next_vpid))
18178  {
18179  /* no more pages to scan */
18180  pgbuf_ordered_unfix (thread_p, &pg_watcher);
18181  return S_END;
18182  }
18183  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
18184  }
18185 
18186  /* get page pointer to next page */
18187  pg_watcher.pgptr =
18188  heap_scan_pb_lock_and_fetch (thread_p, next_vpid, OLD_PAGE_PREVENT_DEALLOC, S_LOCK, NULL, &pg_watcher);
18189  if (old_pg_watcher.pgptr != NULL)
18190  {
18191  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
18192  }
18193  if (pg_watcher.pgptr == NULL)
18194  {
18195  return S_ERROR;
18196  }
18197 
18198  /* read page information and return scan code */
18199  scan = heap_get_page_info (thread_p, class_oid, hfid, next_vpid, pg_watcher.pgptr, cache_pageinfo);
18200 
18201  pgbuf_ordered_unfix (thread_p, &pg_watcher);
18202  return scan;
18203 }
18204 
18205 /*
18206  * heap_page_prev () - Advance to previous page in chain and obtain
18207  * information.
18208  *
18209  * return : SCAN_CODE.
18210  * thread_p (in) : Thread entry.
18211  * class_oid (in) : Class object identifier.
18212  * hfid (in) : Heap file identifier.
18213  * prev_vpid (in) : Previous page identifier.
18214  * cache_pageinfo (in) : Pointers to DB_VALUEs where page information is
18215  * stored.
18216  */
18217 SCAN_CODE
18218 heap_page_prev (THREAD_ENTRY * thread_p, const OID * class_oid, const HFID * hfid, VPID * prev_vpid,
18219  DB_VALUE ** cache_pageinfo)
18220 {
18221  PGBUF_WATCHER pg_watcher;
18222  PGBUF_WATCHER old_pg_watcher;
18223  SCAN_CODE scan = S_SUCCESS;
18224 
18227 
18228  /* get next page */
18229  if (VPID_ISNULL (prev_vpid))
18230  {
18231  /* set to last page */
18232  if (heap_get_last_vpid (thread_p, hfid, prev_vpid) != NO_ERROR)
18233  {
18234  ASSERT_ERROR ();
18235  return S_ERROR;
18236  }
18237  }
18238  else
18239  {
18240  pg_watcher.pgptr = heap_scan_pb_lock_and_fetch (thread_p, prev_vpid, OLD_PAGE, S_LOCK, NULL, &pg_watcher);
18241  if (pg_watcher.pgptr == NULL)
18242  {
18243  return S_ERROR;
18244  }
18245  /* get next page */
18246  heap_vpid_prev (thread_p, hfid, pg_watcher.pgptr, prev_vpid);
18247  if (OID_ISNULL (prev_vpid))
18248  {
18249  /* no more pages to scan */
18250  return S_END;
18251  }
18252  /* get next page */
18253  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
18254  }
18255 
18256  pg_watcher.pgptr =
18257  heap_scan_pb_lock_and_fetch (thread_p, prev_vpid, OLD_PAGE_PREVENT_DEALLOC, S_LOCK, NULL, &pg_watcher);
18258  if (old_pg_watcher.pgptr != NULL)
18259  {
18260  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
18261  }
18262  if (pg_watcher.pgptr == NULL)
18263  {
18264  pgbuf_ordered_unfix (thread_p, &pg_watcher);
18265  return S_ERROR;
18266  }
18267 
18268  /* read page information and return scan code */
18269  scan = heap_get_page_info (thread_p, class_oid, hfid, prev_vpid, pg_watcher.pgptr, cache_pageinfo);
18270 
18271  pgbuf_ordered_unfix (thread_p, &pg_watcher);
18272  return scan;
18273 }
18274 
18275 /*
18276  * heap_get_record_info () - Heap function to obtain record information and
18277  * record data.
18278  *
18279  * return : SCAN CODE (S_SUCCESS or S_ERROR).
18280  * thread_p (in) : Thread entry.
18281  * oid (in) : Object identifier.
18282  * recdes (out) : Record descriptor (to save record data).
18283  * forward_recdes (in) : Record descriptor used by REC_RELOCATION & REC_BIGONE
18284  * records.
18285  * pgptr (in/out) : Pointer to the page this object belongs to.
18286  * scan_cache (in) : Heap scan cache.
18287  * ispeeking (in) : PEEK/COPY.
18288  * record_info (out) : Stores record information.
18289  */
18290 static SCAN_CODE
18291 heap_get_record_info (THREAD_ENTRY * thread_p, const OID oid, RECDES * recdes, RECDES forward_recdes,
18292  PGBUF_WATCHER * page_watcher, HEAP_SCANCACHE * scan_cache, bool ispeeking,
18293  DB_VALUE ** record_info)
18294 {
18295  SPAGE_SLOT *slot_p = NULL;
18296  SCAN_CODE scan = S_SUCCESS;
18297  OID forward_oid;
18298  MVCC_REC_HEADER mvcc_header;
18299 
18300  assert (page_watcher != NULL);
18301  assert (record_info != NULL);
18302  assert (recdes != NULL);
18303 
18304  /* careful adding values in the right order */
18305  db_make_int (record_info[HEAP_RECORD_INFO_T_VOLUMEID], oid.volid);
18306  db_make_int (record_info[HEAP_RECORD_INFO_T_PAGEID], oid.pageid);
18307  db_make_int (record_info[HEAP_RECORD_INFO_T_SLOTID], oid.slotid);
18308 
18309  /* get slot info */
18310  slot_p = spage_get_slot (page_watcher->pgptr, oid.slotid);
18311  if (slot_p == NULL)
18312  {
18313  assert (0);
18314  }
18315  db_make_int (record_info[HEAP_RECORD_INFO_T_OFFSET], slot_p->offset_to_record);
18316  db_make_int (record_info[HEAP_RECORD_INFO_T_LENGTH], slot_p->record_length);
18317  db_make_int (record_info[HEAP_RECORD_INFO_T_REC_TYPE], slot_p->record_type);
18318 
18319  /* get record info */
18320  switch (slot_p->record_type)
18321  {
18322  case REC_NEWHOME:
18323  case REC_HOME:
18324  if (scan_cache != NULL && ispeeking == COPY && recdes->data == NULL)
18325  {
18326  /* It is guaranteed that scan_cache is not NULL. */
18327  if (scan_cache->area == NULL)
18328  {
18329  /* Allocate an area to hold the object. Assume that the object will fit in two pages for not better
18330  * estimates. */
18331  scan_cache->area_size = DB_PAGESIZE * 2;
18332  scan_cache->area = (char *) db_private_alloc (thread_p, scan_cache->area_size);
18333  if (scan_cache->area == NULL)
18334  {
18335  scan_cache->area_size = -1;
18336  pgbuf_ordered_unfix (thread_p, page_watcher);
18337  return S_ERROR;
18338  }
18339  }
18340  recdes->data = scan_cache->area;
18341  recdes->area_size = scan_cache->area_size;
18342  /* The allocated space is enough to save the instance. */
18343  }
18344  if (scan_cache != NULL && scan_cache->cache_last_fix_page == true)
18345  {
18346  scan = spage_get_record (thread_p, page_watcher->pgptr, oid.slotid, recdes, ispeeking);
18347  pgbuf_replace_watcher (thread_p, page_watcher, &scan_cache->page_watcher);
18348  }
18349  else
18350  {
18351  scan = spage_get_record (thread_p, page_watcher->pgptr, oid.slotid, recdes, COPY);
18352  pgbuf_ordered_unfix (thread_p, page_watcher);
18353  }
18356  or_mvcc_get_header (recdes, &mvcc_header);
18357  db_make_bigint (record_info[HEAP_RECORD_INFO_T_MVCC_INSID], MVCC_GET_INSID (&mvcc_header));
18358  if (MVCC_IS_HEADER_DELID_VALID (&mvcc_header))
18359  {
18360  db_make_bigint (record_info[HEAP_RECORD_INFO_T_MVCC_DELID], MVCC_GET_DELID (&mvcc_header));
18361  }
18362  else
18363  {
18365  }
18366  db_make_int (record_info[HEAP_RECORD_INFO_T_CHN], OR_GET_MVCC_CHN (&mvcc_header));
18367  db_make_int (record_info[HEAP_RECORD_INFO_T_MVCC_FLAGS], MVCC_GET_FLAG (&mvcc_header));
18369  {
18371  }
18372  else
18373  {
18375  }
18376  break;
18377 
18378  case REC_BIGONE:
18379  /* Get the address of the content of the multiple page object */
18380  COPY_OID (&forward_oid, (OID *) forward_recdes.data);
18381  pgbuf_ordered_unfix (thread_p, page_watcher);
18382 
18383  /* Now get the content of the multiple page object. */
18384  /* Try to reuse the previously allocated area */
18385  if (scan_cache != NULL && (ispeeking == PEEK || recdes->data == NULL))
18386  {
18387  /* It is guaranteed that scan_cache is not NULL. */
18388  if (scan_cache->area == NULL)
18389  {
18390  /* Allocate an area to hold the object. Assume that the object will fit in two pages for not better
18391  * estimates. We could call heap_ovf_get_length, but it may be better to just guess and realloc if
18392  * needed. We could also check the estimates for average object length, but again, it may be expensive
18393  * and may not be accurate for the object. */
18394  scan_cache->area_size = DB_PAGESIZE * 2;
18395  scan_cache->area = (char *) db_private_alloc (thread_p, scan_cache->area_size);
18396  if (scan_cache->area == NULL)
18397  {
18398  scan_cache->area_size = -1;
18399  return S_ERROR;
18400  }
18401  }
18402  recdes->data = scan_cache->area;
18403  recdes->area_size = scan_cache->area_size;
18404 
18405  while ((scan = heap_ovf_get (thread_p, &forward_oid, recdes, NULL_CHN, NULL)) == S_DOESNT_FIT)
18406  {
18407  /* The object did not fit into such an area, reallocate a new area */
18409  recdes->data = (char *) db_private_realloc (thread_p, scan_cache->area, recdes->area_size);
18410  if (recdes->data == NULL)
18411  {
18412  return S_ERROR;
18413  }
18414  scan_cache->area_size = recdes->area_size;
18415  scan_cache->area = recdes->data;
18416  }
18417  if (scan != S_SUCCESS)
18418  {
18419  recdes->data = NULL;
18420  }
18421  }
18422  else
18423  {
18424  scan = heap_ovf_get (thread_p, &forward_oid, recdes, NULL_CHN, NULL);
18425  }
18426  if (scan != S_SUCCESS)
18427  {
18428  return S_ERROR;
18429  }
18432 
18433  or_mvcc_get_header (recdes, &mvcc_header);
18434  db_make_bigint (record_info[HEAP_RECORD_INFO_T_MVCC_INSID], MVCC_GET_INSID (&mvcc_header));
18435  if (MVCC_IS_HEADER_DELID_VALID (&mvcc_header))
18436  {
18437  db_make_bigint (record_info[HEAP_RECORD_INFO_T_MVCC_DELID], MVCC_GET_DELID (&mvcc_header));
18438  }
18439  else
18440  {
18442  }
18443  db_make_int (record_info[HEAP_RECORD_INFO_T_CHN], OR_GET_MVCC_CHN (&mvcc_header));
18444  db_make_int (record_info[HEAP_RECORD_INFO_T_MVCC_FLAGS], MVCC_GET_FLAG (&mvcc_header));
18446  {
18448  }
18449  else
18450  {
18452  }
18453  break;
18454  case REC_RELOCATION:
18455  case REC_MARKDELETED:
18457  case REC_ASSIGN_ADDRESS:
18458  case REC_UNKNOWN:
18459  default:
18460  db_make_null (record_info[HEAP_RECORD_INFO_T_REPRID]);
18461  db_make_null (record_info[HEAP_RECORD_INFO_T_CHN]);
18465 
18467 
18468  recdes->area_size = -1;
18469  recdes->data = NULL;
18470  if (scan_cache != NULL && scan_cache->cache_last_fix_page)
18471  {
18472  assert (PGBUF_IS_CLEAN_WATCHER (&(scan_cache->page_watcher)));
18473  if (page_watcher->pgptr != NULL)
18474  {
18475  pgbuf_replace_watcher (thread_p, page_watcher, &scan_cache->page_watcher);
18476  }
18477  }
18478  else if (page_watcher->pgptr != NULL)
18479  {
18480  pgbuf_ordered_unfix (thread_p, page_watcher);
18481  }
18482  break;
18483  }
18484 
18485  return scan;
18486 }
18487 
18488 /*
18489  * heap_next () - Retrieve or peek next object
18490  * return: SCAN_CODE (Either of S_SUCCESS, S_DOESNT_FIT, S_END, S_ERROR)
18491  * hfid(in):
18492  * class_oid(in):
18493  * next_oid(in/out): Object identifier of current record.
18494  * Will be set to next available record or NULL_OID when
18495  * there is not one.
18496  * recdes(in/out): Pointer to a record descriptor. Will be modified to
18497  * describe the new record.
18498  * scan_cache(in/out): Scan cache or NULL
18499  * ispeeking(in): PEEK when the object is peeked, scan_cache cannot be NULL
18500  * COPY when the object is copied
18501  *
18502  */
18503 SCAN_CODE
18504 heap_next (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * next_oid, RECDES * recdes,
18505  HEAP_SCANCACHE * scan_cache, int ispeeking)
18506 {
18507  return heap_next_internal (thread_p, hfid, class_oid, next_oid, recdes, scan_cache, ispeeking, false, NULL);
18508 }
18509 
18510 /*
18511  * heap_next_record_info () - Retrieve or peek next object.
18512  *
18513  * return : SCAN_CODE.
18514  * thread_p (in) : Thread entry.
18515  * hfid (in) : Heap file identifier.
18516  * class_oid (in) : Class Object identifier.
18517  * next_oid (in/out) : Current object identifier. Will store the next
18518  * scanned object identifier.
18519  * recdes (in) : Record descriptor.
18520  * scan_cache (in) : Scan cache.
18521  * ispeeking (in) : PEEK/COPY.
18522  * cache_recordinfo (in/out) : DB_VALUE pointer array that caches record
18523  * information values.
18524  *
18525  * NOTE: This function is similar to heap next. The difference is that all
18526  * slots are scanned in their order in the heap file and along with
18527  * record data also information about that record is obtained.
18528  */
18529 SCAN_CODE
18530 heap_next_record_info (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * next_oid, RECDES * recdes,
18531  HEAP_SCANCACHE * scan_cache, int ispeeking, DB_VALUE ** cache_recordinfo)
18532 {
18533  return heap_next_internal (thread_p, hfid, class_oid, next_oid, recdes, scan_cache, ispeeking, false,
18534  cache_recordinfo);
18535 }
18536 
18537 /*
18538  * heap_prev () - Retrieve or peek next object
18539  * return: SCAN_CODE (Either of S_SUCCESS, S_DOESNT_FIT, S_END, S_ERROR)
18540  * hfid(in):
18541  * class_oid(in):
18542  * next_oid(in/out): Object identifier of current record.
18543  * Will be set to next available record or NULL_OID when
18544  * there is not one.
18545  * recdes(in/out): Pointer to a record descriptor. Will be modified to
18546  * describe the new record.
18547  * scan_cache(in/out): Scan cache or NULL
18548  * ispeeking(in): PEEK when the object is peeked, scan_cache cannot be NULL
18549  * COPY when the object is copied
18550  *
18551  */
18552 SCAN_CODE
18553 heap_prev (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * next_oid, RECDES * recdes,
18554  HEAP_SCANCACHE * scan_cache, int ispeeking)
18555 {
18556  return heap_next_internal (thread_p, hfid, class_oid, next_oid, recdes, scan_cache, ispeeking, true, NULL);
18557 }
18558 
18559 /*
18560  * heap_prev_record_info () - Retrieve or peek next object.
18561  *
18562  * return : SCAN_CODE.
18563  * thread_p (in) : Thread entry.
18564  * hfid (in) : Heap file identifier.
18565  * class_oid (in) : Class Object identifier.
18566  * prev_oid (in/out) : Current object identifier. Will store the
18567  * previous scanned object identifier.
18568  * recdes (in) : Record descriptor.
18569  * scan_cache (in) : Scan cache.
18570  * ispeeking (in) : PEEK/COPY.
18571  * cache_recordinfo (in/out) : DB_VALUE pointer array that caches record
18572  * information values
18573  *
18574  * NOTE: This function is similar to heap next. The difference is that all
18575  * slots are scanned in their order in the heap file and along with
18576  * record data also information about that record is obtained.
18577  */
18578 SCAN_CODE
18579 heap_prev_record_info (THREAD_ENTRY * thread_p, const HFID * hfid, OID * class_oid, OID * next_oid, RECDES * recdes,
18580  HEAP_SCANCACHE * scan_cache, int ispeeking, DB_VALUE ** cache_recordinfo)
18581 {
18582  return heap_next_internal (thread_p, hfid, class_oid, next_oid, recdes, scan_cache, ispeeking, true,
18583  cache_recordinfo);
18584 }
18585 
18586 /*
18587  * heap_scancache_start_chain_update () - start new scan cache for MVCC
18588  * chain update
18589  * return: error code
18590  * thread_p(in): thread entry
18591  * new_scan_cache(in/out): the new scan cache
18592  * old_scan_cache(in/out): the old scan cache
18593  * next_row_version(in): next row version
18594  *
18595  * Note: This function start a new scan cache used to update
18596  * MVCC chain. The old and new scan cache may share common data.
18597  */
18598 int
18599 heap_scancache_start_chain_update (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * new_scan_cache,
18600  HEAP_SCANCACHE * old_scan_cache, OID * next_row_version)
18601 {
18602  assert (new_scan_cache != NULL);
18603  assert (old_scan_cache != NULL);
18604 
18605  /* update scan cache */
18606 
18607  /* start a local cache that is suitable for our needs */
18608  if (heap_scancache_start (thread_p, new_scan_cache, &old_scan_cache->node.hfid, &old_scan_cache->node.class_oid,
18609  true, false, NULL) != NO_ERROR)
18610  {
18611  /* TODO - er_set */
18612  return ER_FAILED;
18613  }
18614 
18615  /* use current_scan_cache->area in order to alloc only once */
18616  new_scan_cache->area = old_scan_cache->area;
18617  new_scan_cache->area_size = old_scan_cache->area_size;
18618  /* set pgptr if is the case */
18619  if (old_scan_cache->page_watcher.pgptr != NULL)
18620  {
18621  /* handle this case, just to be sure */
18622  VPID *vpidptr_incache;
18623  VPID vpid;
18624 
18625  vpid.volid = next_row_version->volid;
18626  vpid.pageid = next_row_version->pageid;
18627  vpidptr_incache = pgbuf_get_vpid_ptr (old_scan_cache->page_watcher.pgptr);
18628  if (VPID_EQ (&vpid, vpidptr_incache))
18629  {
18630  pgbuf_replace_watcher (thread_p, &old_scan_cache->page_watcher, &new_scan_cache->page_watcher);
18631  }
18632  else
18633  {
18634  /* Free the previous scan page */
18635  pgbuf_ordered_unfix (thread_p, &old_scan_cache->page_watcher);
18636  }
18637  }
18638 
18639  return NO_ERROR;
18640 }
18641 
18642 /*
18643  * heap_get_mvcc_rec_header_from_overflow () - Get record header from overflow
18644  * page.
18645  *
18646  * return :
18647  * PAGE_PTR ovf_page (in) : overflow page pointer
18648  * MVCC_REC_HEADER * mvcc_header (in/out) : MVCC record header
18649  * recdes(in/out): if not NULL then receives first overflow page
18650  */
18651 int
18653 {
18654  RECDES ovf_recdes;
18655 
18656  assert (ovf_page != NULL);
18657  assert (mvcc_header != NULL);
18658 
18659  if (peek_recdes == NULL)
18660  {
18661  peek_recdes = &ovf_recdes;
18662  }
18663  peek_recdes->data = overflow_get_first_page_data (ovf_page);
18664  peek_recdes->length = OR_MVCC_MAX_HEADER_SIZE;
18665 
18666  return or_mvcc_get_header (peek_recdes, mvcc_header);
18667 }
18668 
18669 /*
18670  * heap_set_mvcc_rec_header_on_overflow () - Updates MVCC record header on
18671  * overflow page data.
18672  *
18673  * return : Void.
18674  * ovf_page (in) : First overflow page.
18675  * mvcc_header (in) : MVCC Record header.
18676  */
18677 int
18679 {
18680  RECDES ovf_recdes;
18681 
18682  assert (ovf_page != NULL);
18683  assert (mvcc_header != NULL);
18684 
18685  ovf_recdes.data = overflow_get_first_page_data (ovf_page);
18686  ovf_recdes.area_size = ovf_recdes.length = OR_HEADER_SIZE (ovf_recdes.data);
18687  /* Safe guard */
18688  assert (ovf_recdes.length == OR_MVCC_MAX_HEADER_SIZE);
18689 
18690  /* Make sure the header has maximum size for overflow records */
18691  if (!MVCC_IS_FLAG_SET (mvcc_header, OR_MVCC_FLAG_VALID_INSID))
18692  {
18693  /* Add MVCCID_ALL_VISIBLE for insert MVCCID */
18695  MVCC_SET_INSID (mvcc_header, MVCCID_ALL_VISIBLE);
18696  }
18697 
18698  if (!MVCC_IS_FLAG_SET (mvcc_header, OR_MVCC_FLAG_VALID_DELID))
18699  {
18700  /* Add MVCCID_NULL for delete MVCCID */
18702  MVCC_SET_DELID (mvcc_header, MVCCID_NULL);
18703  }
18704 
18705  /* Safe guard */
18707  return or_mvcc_set_header (&ovf_recdes, mvcc_header);
18708 }
18709 
18710 /*
18711  * heap_get_bigone_content () - get content of a big record
18712  *
18713  * return : scan code.
18714  * thread_p (in) :
18715  * scan_cache (in) : Scan cache
18716  * ispeeking(in) : 0 if the content will be copied.
18717  * forward_oid(in) : content oid.
18718  * recdes(in/out) : record descriptor that will contain its content
18719  */
18720 SCAN_CODE
18721 heap_get_bigone_content (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, bool ispeeking, OID * forward_oid,
18722  RECDES * recdes)
18723 {
18724  SCAN_CODE scan = S_SUCCESS;
18725 
18726  /* Try to reuse the previously allocated area No need to check the snapshot since was already checked */
18727  if (scan_cache != NULL && (ispeeking == PEEK || recdes->data == NULL || recdes->data == scan_cache->area))
18728  {
18729  if (scan_cache->area == NULL)
18730  {
18731  /*
18732  * Allocate an area to hold the object. Assume that the object will fit in two pages for not better estimates.
18733  * We could call heap_ovf_get_length, but it may be better to just guess and realloc if needed.
18734  * We could also check the estimates for average object length, but again, it may be expensive and may not be
18735  * accurate for this object.
18736  */
18737  scan_cache->area_size = DB_PAGESIZE * 2;
18738  scan_cache->area = (char *) db_private_alloc (thread_p, scan_cache->area_size);
18739  if (scan_cache->area == NULL)
18740  {
18741  scan_cache->area_size = -1;
18742  return S_ERROR;
18743  }
18744  }
18745  recdes->data = scan_cache->area;
18746  recdes->area_size = scan_cache->area_size;
18747 
18748  while ((scan = heap_ovf_get (thread_p, forward_oid, recdes, NULL_CHN, NULL)) == S_DOESNT_FIT)
18749  {
18750  /*
18751  * The object did not fit into such an area, reallocate a new
18752  * area
18753  */
18754 
18756  recdes->data = (char *) db_private_realloc (thread_p, scan_cache->area, recdes->area_size);
18757  if (recdes->data == NULL)
18758  {
18759  return S_ERROR;
18760  }
18761  scan_cache->area_size = recdes->area_size;
18762  scan_cache->area = recdes->data;
18763  }
18764  if (scan != S_SUCCESS)
18765  {
18766  recdes->data = NULL;
18767  }
18768  }
18769  else
18770  {
18771  scan = heap_ovf_get (thread_p, forward_oid, recdes, NULL_CHN, NULL);
18772  }
18773 
18774  return scan;
18775 }
18776 
18777 /*
18778  * heap_get_class_oid_from_page () - Gets heap page owner class OID.
18779  *
18780  * return : Error code.
18781  * thread_p (in) : Thread entry.
18782  * page_p (in) : Heap page.
18783  * class_oid (out) : Class identifier.
18784  */
18785 int
18786 heap_get_class_oid_from_page (THREAD_ENTRY * thread_p, PAGE_PTR page_p, OID * class_oid)
18787 {
18788  RECDES chain_recdes;
18789  HEAP_CHAIN *chain;
18790 
18791  if (spage_get_record (thread_p, page_p, HEAP_HEADER_AND_CHAIN_SLOTID, &chain_recdes, PEEK) != S_SUCCESS)
18792  {
18793  assert (0);
18795  return ER_FAILED;
18796  }
18797 
18798  chain = (HEAP_CHAIN *) chain_recdes.data;
18799  COPY_OID (class_oid, &(chain->class_oid));
18800 
18801  /*
18802  * kludge, root class is identified with a NULL class OID but we must
18803  * substitute the actual OID here - think about this
18804  */
18805  if (OID_ISNULL (class_oid))
18806  {
18807  /* root class class oid, substitute with global */
18808  COPY_OID (class_oid, oid_Root_class_oid);
18809  }
18810  return NO_ERROR;
18811 }
18812 
18813 /*
18814  * heap_mvcc_log_home_change_on_delete () - Log the change of record in home page when MVCC delete does not
18815  * change a REC_HOME to REC_HOME.
18816  *
18817  * return : Void.
18818  * thread_p (in) : Thread entry.
18819  * old_recdes (in) : NULL or a REC_RELOCATION record.
18820  * new_recdes (in) : Record including delete info (MVCCID and next version).
18821  * p_addr (in) : Log data address.
18822  */
18823 static void
18824 heap_mvcc_log_home_change_on_delete (THREAD_ENTRY * thread_p, RECDES * old_recdes, RECDES * new_recdes,
18825  LOG_DATA_ADDR * p_addr)
18826 {
18827  HEAP_PAGE_VACUUM_STATUS vacuum_status = heap_page_get_vacuum_status (thread_p, p_addr->pgptr);
18828 
18829  /* REC_RELOCATION type record was brought back to home page or REC_HOME has been converted to
18830  * REC_RELOCATION/REC_BIGONE. */
18831 
18832  /* Update heap chain for vacuum. */
18833  heap_page_update_chain_after_mvcc_op (thread_p, p_addr->pgptr, logtb_get_current_mvccid (thread_p));
18834  if (heap_page_get_vacuum_status (thread_p, p_addr->pgptr) != vacuum_status)
18835  {
18836  /* Mark vacuum status change for recovery. */
18838  }
18839 
18840  log_append_undoredo_recdes (thread_p, RVHF_MVCC_DELETE_MODIFY_HOME, p_addr, old_recdes, new_recdes);
18841 }
18842 
18843 /*
18844  * heap_mvcc_log_home_no_change () - Update page chain for vacuum and notify vacuum even when home page is not changed.
18845  * Used by update/delete of REC_RELOCATION and REC_BIGONE.
18846  *
18847  * return : Void.
18848  * thread_p (in) : Thread entry.
18849  * p_addr (in) : Data address for logging.
18850  */
18851 static void
18852 heap_mvcc_log_home_no_change (THREAD_ENTRY * thread_p, LOG_DATA_ADDR * p_addr)
18853 {
18854  HEAP_PAGE_VACUUM_STATUS vacuum_status = heap_page_get_vacuum_status (thread_p, p_addr->pgptr);
18855 
18856  /* Update heap chain for vacuum. */
18857  heap_page_update_chain_after_mvcc_op (thread_p, p_addr->pgptr, logtb_get_current_mvccid (thread_p));
18858  if (vacuum_status != heap_page_get_vacuum_status (thread_p, p_addr->pgptr))
18859  {
18860  /* Mark vacuum status change for recovery. */
18862  }
18863 
18864  log_append_undoredo_data (thread_p, RVHF_MVCC_NO_MODIFY_HOME, p_addr, 0, 0, NULL, NULL);
18865 }
18866 
18867 /*
18868  * heap_rv_redo_update_and_update_chain () - Redo update record as part of MVCC delete operation.
18869  * return: int
18870  * rcv(in): Recovery structure
18871  */
18872 int
18874 {
18875  int error_code = NO_ERROR;
18876  bool vacuum_status_change = false;
18877  PGSLOTID slotid;
18878 
18879  assert (rcv->pgptr != NULL);
18880  assert (MVCCID_IS_NORMAL (rcv->mvcc_id));
18881 
18882  slotid = rcv->offset;
18883  if (slotid & HEAP_RV_FLAG_VACUUM_STATUS_CHANGE)
18884  {
18885  vacuum_status_change = true;
18886  }
18887  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
18888  assert (slotid > 0);
18889 
18890  error_code = heap_rv_redo_update (thread_p, rcv);
18891  if (error_code != NO_ERROR)
18892  {
18893  ASSERT_ERROR ();
18894  return error_code;
18895  }
18896 
18897  heap_page_rv_chain_update (thread_p, rcv->pgptr, rcv->mvcc_id, vacuum_status_change);
18898  /* Page was already marked as dirty */
18899  return NO_ERROR;
18900 }
18901 
18902 /*
18903  * heap_attrinfo_check_unique_index () - check whether exists an unique index on
18904  * specified attributes
18905  * return: true, if there is an index containing specified attributes
18906  * thread_p(in): thread entry
18907  * attr_info(in): attribute info
18908  * att_id(in): attribute ids
18909  * n_att_id(in): count attributes
18910  */
18911 bool
18913  int n_att_id)
18914 {
18915  OR_INDEX *index;
18916  int num_btids, i, j, k;
18917 
18918  if (attr_info == NULL || att_id == NULL)
18919  {
18920  return false;
18921  }
18922 
18923  num_btids = attr_info->last_classrepr->n_indexes;
18924  for (i = 0; i < num_btids; i++)
18925  {
18926  index = &(attr_info->last_classrepr->indexes[i]);
18927  if (btree_is_unique_type (index->type))
18928  {
18929  for (j = 0; j < n_att_id; j++)
18930  {
18931  for (k = 0; k < index->n_atts; k++)
18932  {
18933  if (att_id[j] == (ATTR_ID) (index->atts[k]->id))
18934  { /* the index key_type has updated attr */
18935  return true;
18936  }
18937  }
18938  }
18939  }
18940  }
18941 
18942  return false;
18943 }
18944 
18945 #if defined(ENABLE_UNUSED_FUNCTION)
18946 /*
18947  * heap_try_fetch_header_page () -
18948  * try to fetch header page, having home page already fetched
18949  *
18950  * return: error code
18951  * thread_p(in): thread entry
18952  * home_pgptr_p(out):
18953  * home_vpid_p(in):
18954  * oid_p(in):
18955  * hdr_pgptr_p(out):
18956  * hdr_vpid_p(in):
18957  * scan_cache(in):
18958  * again_count_p(in/out):
18959  * again_max(in):
18960  */
18961 /* TODO - fix er_clear */
18962 STATIC_INLINE int
18963 heap_try_fetch_header_page (THREAD_ENTRY * thread_p, PAGE_PTR * home_pgptr_p, const VPID * home_vpid_p,
18964  const OID * oid_p, PAGE_PTR * hdr_pgptr_p, const VPID * hdr_vpid_p,
18965  HEAP_SCANCACHE * scan_cache, int *again_count_p, int again_max)
18966 {
18967  int error_code = NO_ERROR;
18968 
18969  *hdr_pgptr_p = pgbuf_fix (thread_p, hdr_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
18970  if (*hdr_pgptr_p != NULL)
18971  {
18972  return NO_ERROR;
18973  }
18974 
18975  pgbuf_unfix_and_init (thread_p, *home_pgptr_p);
18976  *hdr_pgptr_p = heap_scan_pb_lock_and_fetch (thread_p, hdr_vpid_p, OLD_PAGE, X_LOCK, scan_cache, NULL);
18977  if (*hdr_pgptr_p == NULL)
18978  {
18979  error_code = er_errid ();
18980  if (error_code == ER_PB_BAD_PAGEID)
18981  {
18982  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_HEAP_UNKNOWN_OBJECT, 3, hdr_vpid_p->volid, hdr_vpid_p->pageid,
18983  0);
18984  error_code = ER_HEAP_UNKNOWN_OBJECT;
18985  }
18986  }
18987  else
18988  {
18989  *home_pgptr_p = pgbuf_fix (thread_p, home_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
18990  if (*home_pgptr_p == NULL)
18991  {
18992  pgbuf_unfix_and_init (thread_p, *hdr_pgptr_p);
18993  if ((*again_count_p)++ >= again_max)
18994  {
18995  error_code = er_errid ();
18996  if (error_code == ER_PB_BAD_PAGEID)
18997  {
18999  oid_p->slotid);
19000  error_code = ER_HEAP_UNKNOWN_OBJECT;
19001  }
19002  else if (error_code == NO_ERROR)
19003  {
19005  home_vpid_p->pageid);
19006  error_code = ER_PAGE_LATCH_ABORTED;
19007  }
19008  }
19009  }
19010  }
19011 
19012  return error_code;
19013 }
19014 
19015 /*
19016  * heap_try_fetch_forward_page () -
19017  * try to fetch forward page, having home page already fetched
19018  *
19019  * return: error code
19020  * thread_p(in): thread entry
19021  * home_pgptr_p(out):
19022  * home_vpid_p(in):
19023  * oid_p(in):
19024  * fwd_pgptr_p(out):
19025  * fwd_vpid_p(in):
19026  * fwd_oid_p(in):
19027  * scan_cache(in):
19028  * again_count_p(in/out):
19029  * again_max(in):
19030  */
19031 STATIC_INLINE int
19032 heap_try_fetch_forward_page (THREAD_ENTRY * thread_p, PAGE_PTR * home_pgptr_p, const VPID * home_vpid_p,
19033  const OID * oid_p, PAGE_PTR * fwd_pgptr_p, const VPID * fwd_vpid_p, const OID * fwd_oid_p,
19034  HEAP_SCANCACHE * scan_cache, int *again_count_p, int again_max)
19035 {
19036  int error_code = NO_ERROR;
19037 
19038  *fwd_pgptr_p = pgbuf_fix (thread_p, fwd_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
19039  if (*fwd_pgptr_p != NULL)
19040  {
19041  return NO_ERROR;
19042  }
19043 
19044  pgbuf_unfix_and_init (thread_p, *home_pgptr_p);
19045  *fwd_pgptr_p = heap_scan_pb_lock_and_fetch (thread_p, fwd_vpid_p, OLD_PAGE, X_LOCK, scan_cache, NULL);
19046  if (*fwd_pgptr_p == NULL)
19047  {
19048  error_code = er_errid ();
19049  if (error_code == ER_PB_BAD_PAGEID)
19050  {
19052  fwd_oid_p->slotid);
19053  error_code = ER_HEAP_UNKNOWN_OBJECT;
19054  }
19055  }
19056  else
19057  {
19058  *home_pgptr_p = pgbuf_fix (thread_p, home_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
19059  if (*home_pgptr_p == NULL)
19060  {
19061  pgbuf_unfix_and_init (thread_p, *fwd_pgptr_p);
19062  if ((*again_count_p)++ >= again_max)
19063  {
19064  error_code = er_errid ();
19065  if (error_code == ER_PB_BAD_PAGEID)
19066  {
19068  oid_p->slotid);
19069  error_code = ER_HEAP_UNKNOWN_OBJECT;
19070  }
19071  else if (error_code == NO_ERROR)
19072  {
19074  home_vpid_p->pageid);
19075  error_code = ER_PAGE_LATCH_ABORTED;
19076  }
19077  }
19078  }
19079  }
19080 
19081  return error_code;
19082 }
19083 
19084 /*
19085  * heap_try_fetch_header_with_forward_page () -
19086  * try to fetch header and forward page, having home page already fetched
19087  *
19088  * return: error code
19089  * thread_p(in): thread entry
19090  * home_pgptr_p(out):
19091  * home_vpid_p(in):
19092  * oid_p(in):
19093  * hdr_pgptr_p(out):
19094  * hdr_vpid_p(in):
19095  * fwd_pgptr_p(out):
19096  * fwd_vpid_p(in):
19097  * fwd_oid_p(in):
19098  * scan_cache(in):
19099  * again_count_p(in/out):
19100  * again_max(in):
19101  */
19102 STATIC_INLINE int
19103 heap_try_fetch_header_with_forward_page (THREAD_ENTRY * thread_p, PAGE_PTR * home_pgptr_p, const VPID * home_vpid_p,
19104  const OID * oid_p, PAGE_PTR * hdr_pgptr_p, const VPID * hdr_vpid_p,
19105  PAGE_PTR * fwd_pgptr_p, const VPID * fwd_vpid_p, const OID * fwd_oid_p,
19106  HEAP_SCANCACHE * scan_cache, int *again_count_p, int again_max)
19107 {
19108  int error_code = NO_ERROR;
19109 
19110  *hdr_pgptr_p = pgbuf_fix (thread_p, hdr_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
19111  if (*hdr_pgptr_p != NULL)
19112  {
19113  return NO_ERROR;
19114  }
19115 
19116  pgbuf_unfix_and_init (thread_p, *home_pgptr_p);
19117  pgbuf_unfix_and_init (thread_p, *fwd_pgptr_p);
19118  *hdr_pgptr_p = heap_scan_pb_lock_and_fetch (thread_p, hdr_vpid_p, OLD_PAGE, X_LOCK, scan_cache, NULL);
19119  if (*hdr_pgptr_p == NULL)
19120  {
19121  error_code = er_errid ();
19122  if (error_code == ER_PB_BAD_PAGEID)
19123  {
19124  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_HEAP_UNKNOWN_OBJECT, 3, hdr_vpid_p->volid, hdr_vpid_p->pageid,
19125  0);
19126  error_code = ER_HEAP_UNKNOWN_OBJECT;
19127  }
19128  }
19129  else
19130  {
19131  *home_pgptr_p = pgbuf_fix (thread_p, home_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
19132  if (*home_pgptr_p == NULL)
19133  {
19134  pgbuf_unfix_and_init (thread_p, *hdr_pgptr_p);
19135  if ((*again_count_p)++ >= again_max)
19136  {
19137  error_code = er_errid ();
19138  if (error_code == ER_PB_BAD_PAGEID)
19139  {
19141  oid_p->slotid);
19142  error_code = ER_HEAP_UNKNOWN_OBJECT;
19143  }
19144  else if (error_code == NO_ERROR)
19145  {
19147  home_vpid_p->pageid);
19148  error_code = ER_PAGE_LATCH_ABORTED;
19149  }
19150  }
19151  }
19152  else
19153  {
19154  *fwd_pgptr_p = pgbuf_fix (thread_p, fwd_vpid_p, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_CONDITIONAL_LATCH);
19155  if (*fwd_pgptr_p == NULL)
19156  {
19157  pgbuf_unfix_and_init (thread_p, *hdr_pgptr_p);
19158  pgbuf_unfix_and_init (thread_p, *home_pgptr_p);
19159  if ((*again_count_p)++ >= again_max)
19160  {
19161  error_code = er_errid ();
19162  if (error_code == ER_PB_BAD_PAGEID)
19163  {
19165  fwd_oid_p->pageid, fwd_oid_p->slotid);
19166  error_code = ER_HEAP_UNKNOWN_OBJECT;
19167  }
19168  else if (er_errid () == NO_ERROR)
19169  {
19171  fwd_vpid_p->pageid);
19172  }
19173  }
19174  }
19175  }
19176  }
19177 
19178  return error_code;
19179 }
19180 #endif /* ENABLE_UNUSED_FUNCTION */
19181 
19182 /*
19183  * heap_get_header_page () -
19184  * return: error code
19185  * btid(in): Heap file identifier
19186  * header_vpid(out):
19187  *
19188  * Note: get the page identifier of the first allocated page of the given file.
19189  */
19190 int
19191 heap_get_header_page (THREAD_ENTRY * thread_p, const HFID * hfid, VPID * header_vpid)
19192 {
19193  assert (!VFID_ISNULL (&hfid->vfid));
19194 
19195  return file_get_sticky_first_page (thread_p, &hfid->vfid, header_vpid);
19196 }
19197 
19198 /*
19199  * heap_scancache_quick_start_root_hfid () - Start caching information for a
19200  * heap scan on root hfid
19201  * return: NO_ERROR
19202  * thread_p(in):
19203  * scan_cache(in/out): Scan cache
19204  *
19205  * Note: this is similar to heap_scancache_quick_start, except it sets the
19206  * HFID of root in the scan_cache (otherwise remains NULL).
19207  * This should be used to avoid inconsistency when using ordered fix.
19208  */
19209 int
19211 {
19212  HFID root_hfid;
19213 
19214  (void) boot_find_root_heap (&root_hfid);
19215  (void) heap_scancache_quick_start_internal (scan_cache, &root_hfid);
19216  scan_cache->page_latch = S_LOCK;
19217 
19218  return NO_ERROR;
19219 }
19220 
19221 
19222 /*
19223  * heap_scancache_quick_start_with_class_oid () - Start caching information for
19224  * a heap scan on a class.
19225  *
19226  * return: NO_ERROR
19227  * thread_p(in):
19228  * scan_cache(in/out): Scan cache
19229  * class_oid(in): class
19230  *
19231  * Note: this is similar to heap_scancache_quick_start, except it sets the
19232  * HFID of class in the scan_cache (otherwise remains NULL).
19233  * This should be used to avoid inconsistency when using ordered fix.
19234  * This has a page latch overhead on top of heap_scancache_quick_start.
19235  *
19236  */
19237 int
19239 {
19240  HFID class_hfid;
19241 
19242  heap_get_hfid_from_class_oid (thread_p, class_oid, &class_hfid);
19243  (void) heap_scancache_quick_start_with_class_hfid (thread_p, scan_cache, &class_hfid);
19244  scan_cache->page_latch = S_LOCK;
19245 
19246  return NO_ERROR;
19247 }
19248 
19249 /*
19250  * heap_scancache_quick_start_with_class_hfid () - Start caching information for
19251  * a heap scan on a class.
19252  *
19253  * return: NO_ERROR
19254  * thread_p(in):
19255  * scan_cache(in/out): Scan cache
19256  * class_oid(in): class
19257  *
19258  * Note: this is similar to heap_scancache_quick_start, except it sets the
19259  * HFID of class in the scan_cache (otherwise remains NULL).
19260  * This should be used to avoid inconsistency when using ordered fix.
19261  *
19262  */
19263 int
19265 {
19266  (void) heap_scancache_quick_start_internal (scan_cache, hfid);
19267  scan_cache->page_latch = S_LOCK;
19268 
19269  return NO_ERROR;
19270 }
19271 
19272 
19273 /*
19274  * heap_scancache_quick_start_modify_with_class_oid () -
19275  * Start caching information for a heap scan on class.
19276  *
19277  * return: NO_ERROR
19278  * thread_p(in):
19279  * scan_cache(in/out): Scan cache
19280  * class_oid(in): class
19281  *
19282  * Note: this is similar to heap_scancache_quick_start_modify, except it sets
19283  * the HFID of class in the scan_cache (otherwise remains NULL).
19284  * This should be used to avoid inconsistency when using ordered fix.
19285  * This has a page latch overhead on top of heap_scancache_quick_start.
19286  */
19287 int
19289 {
19290  HFID class_hfid;
19291 
19292  heap_get_hfid_from_class_oid (thread_p, class_oid, &class_hfid);
19293  (void) heap_scancache_quick_start_internal (scan_cache, &class_hfid);
19294  scan_cache->page_latch = X_LOCK;
19295 
19296  return NO_ERROR;
19297 }
19298 
19299 /*
19300  * heap_link_watchers () - link page watchers of a child operation to it's
19301  * parent
19302  * child(in): child operation context
19303  * parent(in): parent operation context
19304  *
19305  * NOTE: Sometimes, parts of a heap operation are executed in a parent heap
19306  * operation, skipping the fixing of pages and location of records.
19307  * Since page watchers are identified by address, we must use a single
19308  * location for them, and reference it everywhere.
19309  */
19310 static void
19311 heap_link_watchers (HEAP_OPERATION_CONTEXT * child, HEAP_OPERATION_CONTEXT * parent)
19312 {
19313  assert (child != NULL);
19314  assert (parent != NULL);
19315 
19316  child->header_page_watcher_p = &parent->header_page_watcher;
19317  child->forward_page_watcher_p = &parent->forward_page_watcher;
19319  child->home_page_watcher_p = &parent->home_page_watcher;
19320 }
19321 
19322 /*
19323  * heap_unfix_watchers () - unfix context pages
19324  * thread_p(in): thread entry
19325  * context(in): operation context
19326  *
19327  * NOTE: This function only unfixes physical watchers. Calling this in a child
19328  * operation that was linked to the parent with heap_link_watchers will
19329  * have no effect on the fixed pages.
19330  */
19331 static void
19332 heap_unfix_watchers (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context)
19333 {
19334  assert (context != NULL);
19335 
19336  /* unfix pages */
19337  if (context->home_page_watcher.pgptr != NULL)
19338  {
19339  pgbuf_ordered_unfix (thread_p, &context->home_page_watcher);
19340  }
19341  if (context->overflow_page_watcher.pgptr != NULL)
19342  {
19343  pgbuf_ordered_unfix (thread_p, &context->overflow_page_watcher);
19344  }
19345  if (context->header_page_watcher.pgptr != NULL)
19346  {
19347  pgbuf_ordered_unfix (thread_p, &context->header_page_watcher);
19348  }
19349  if (context->forward_page_watcher.pgptr != NULL)
19350  {
19351  pgbuf_ordered_unfix (thread_p, &context->forward_page_watcher);
19352  }
19353 }
19354 
19355 /*
19356  * heap_clear_operation_context () - clear a heap operation context
19357  * context(in): the context
19358  * hfid_p(in): heap file identifier
19359  */
19360 static void
19361 heap_clear_operation_context (HEAP_OPERATION_CONTEXT * context, HFID * hfid_p)
19362 {
19363  assert (context != NULL);
19364  assert (hfid_p != NULL);
19365 
19366  /* keep hfid */
19367  HFID_COPY (&context->hfid, hfid_p);
19368 
19369  /* initialize watchers to HFID */
19374 
19375  /* by default link physical watchers to usage watchers on same context */
19376  heap_link_watchers (context, context);
19377 
19378  /* nullify everything else */
19379  context->type = HEAP_OPERATION_NONE;
19381  OID_SET_NULL (&context->oid);
19382  OID_SET_NULL (&context->class_oid);
19383  context->recdes_p = NULL;
19384  context->scan_cache_p = NULL;
19385 
19386  context->map_recdes.data = NULL;
19387  context->map_recdes.length = 0;
19388  context->map_recdes.area_size = 0;
19389  context->map_recdes.type = REC_UNKNOWN;
19390 
19391  OID_SET_NULL (&context->ovf_oid);
19392 
19393  context->home_recdes.data = NULL;
19394  context->home_recdes.length = 0;
19395  context->home_recdes.area_size = 0;
19396  context->home_recdes.type = REC_UNKNOWN;
19397 
19398  context->record_type = REC_UNKNOWN;
19399  context->file_type = FILE_UNKNOWN_TYPE;
19400  OID_SET_NULL (&context->res_oid);
19401  context->is_logical_old = false;
19402  context->is_redistribute_insert_with_delid = false;
19403 
19404  context->time_track = NULL;
19405 }
19406 
19407 /*
19408  * heap_mark_class_as_modified () - add to transaction's modified class list
19409  * and cache/decache coherency number
19410  * thread_p(in): thread entry
19411  * oid_p(in): class OID
19412  * chn(in): coherency number (required iff decache == false)
19413  * decache(in): (false => cache, true => decache)
19414  */
19415 static int
19416 heap_mark_class_as_modified (THREAD_ENTRY * thread_p, OID * oid_p, int chn, bool decache)
19417 {
19418  char *classname = NULL;
19419 
19420  assert (oid_p != NULL);
19421 
19422  if (heap_Guesschn == NULL || HFID_IS_NULL (&(heap_Classrepr->rootclass_hfid)))
19423  {
19424  /* nothing to do */
19425  return NO_ERROR;
19426  }
19427 
19428  if (heap_get_class_name (thread_p, oid_p, &classname) != NO_ERROR || classname == NULL)
19429  {
19430  ASSERT_ERROR ();
19431  return ER_FAILED;
19432  }
19433  if (log_add_to_modified_class_list (thread_p, classname, oid_p) != NO_ERROR)
19434  {
19435  free_and_init (classname);
19436  return ER_FAILED;
19437  }
19438 
19439  free_and_init (classname);
19440 
19441  if (csect_enter (thread_p, CSECT_HEAP_CHNGUESS, INF_WAIT) != NO_ERROR)
19442  {
19443  return ER_FAILED;
19444  }
19445  heap_Guesschn->schema_change = true;
19446 
19447  if (decache)
19448  {
19449  (void) heap_chnguess_decache (oid_p);
19450  }
19451  else
19452  {
19453  (void) heap_chnguess_put (thread_p, oid_p, LOG_FIND_THREAD_TRAN_INDEX (thread_p), chn);
19454  }
19455 
19456  csect_exit (thread_p, CSECT_HEAP_CHNGUESS);
19457 
19458  /* all ok */
19459  return NO_ERROR;
19460 }
19461 
19462 /*
19463  * heap_get_file_type () - get the file type from a heap operation context
19464  * thread_p(in): thread entry
19465  * context(in): operation context
19466  * returns: file type
19467  */
19468 static FILE_TYPE
19469 heap_get_file_type (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context)
19470 {
19471  FILE_TYPE file_type;
19472  if (context->scan_cache_p != NULL)
19473  {
19474  assert (HFID_EQ (&context->hfid, &context->scan_cache_p->node.hfid));
19476 
19477  return context->scan_cache_p->file_type;
19478  }
19479  else
19480  {
19481  if (heap_get_hfid_and_file_type_from_class_oid (thread_p, &context->class_oid, NULL, &file_type) != NO_ERROR)
19482  {
19483  ASSERT_ERROR ();
19484  return FILE_UNKNOWN_TYPE;
19485  }
19486  assert (file_type == FILE_HEAP || file_type == FILE_HEAP_REUSE_SLOTS);
19487  return file_type;
19488  }
19489 }
19490 
19491 /*
19492  * heap_is_valid_oid () - check if provided OID is valid
19493  * oid_p(in): object identifier
19494  * returns: error code or NO_ERROR
19495  */
19496 static int
19497 heap_is_valid_oid (THREAD_ENTRY * thread_p, OID * oid_p)
19498 {
19499  DISK_ISVALID oid_valid = HEAP_ISVALID_OID (thread_p, oid_p);
19500 
19501  if (oid_valid != DISK_VALID)
19502  {
19503  if (oid_valid != DISK_ERROR)
19504  {
19505  assert (false);
19507  oid_p->slotid);
19508  }
19509  return ER_FAILED;
19510  }
19511  else
19512  {
19513  return NO_ERROR;
19514  }
19515 }
19516 
19517 /*
19518  * heap_fix_header_page () - fix header page for a heap operation context
19519  * thread_p(in): thread entry
19520  * context(in): operation context
19521  * returns: error code or NO_ERROR
19522  */
19523 static int
19524 heap_fix_header_page (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context)
19525 {
19526  VPID header_vpid;
19527  int rc;
19528 
19529  assert (context != NULL);
19530  assert (context->header_page_watcher_p != NULL);
19531 
19532  if (context->header_page_watcher_p->pgptr != NULL)
19533  {
19534  /* already fixed */
19535  return NO_ERROR;
19536  }
19537 
19538  /* fix header page */
19539  header_vpid.volid = context->hfid.vfid.volid;
19540  header_vpid.pageid = context->hfid.hpgid;
19541 
19542  /* fix page */
19543  rc = pgbuf_ordered_fix (thread_p, &header_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, context->header_page_watcher_p);
19544  if (rc != NO_ERROR)
19545  {
19546  if (rc == ER_LK_PAGE_TIMEOUT && er_errid () == NO_ERROR)
19547  {
19548  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_PAGE_LATCH_ABORTED, 2, header_vpid.volid, header_vpid.pageid);
19549  rc = ER_PAGE_LATCH_ABORTED;
19550  }
19551  return rc;
19552  }
19553 
19554  /* check page type */
19555  (void) pgbuf_check_page_ptype (thread_p, context->header_page_watcher_p->pgptr, PAGE_HEAP);
19556 
19557  /* all ok */
19558  return NO_ERROR;
19559 }
19560 
19561 /*
19562  * heap_fix_forward_page () - fix forward page for a heap operation context
19563  * thread_p(in): thread entry
19564  * context(in): operation context
19565  * forward_oid_hint(in): location of forward object (if known)
19566  * returns: error code or NO_ERROR
19567  *
19568  * NOTE: If forward_oid_hint is provided, this function will fix it's page. If
19569  * not, the function will treat the context's home_recdes as a forwarding
19570  * record descriptor and read the identifier from it.
19571  */
19572 static int
19573 heap_fix_forward_page (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, OID * forward_oid_hint)
19574 {
19575  VPID forward_vpid;
19576  OID forward_oid;
19577  int rc;
19578 
19579  assert (context != NULL);
19580  assert (context->forward_page_watcher_p != NULL);
19581 
19582  if (context->forward_page_watcher_p->pgptr != NULL)
19583  {
19584  /* already fixed */
19585  return NO_ERROR;
19586  }
19587 
19588  if (forward_oid_hint == NULL)
19589  {
19590  assert (context->home_recdes.data != NULL);
19591 
19592  /* cast home record as forward oid if no hint is provided */
19593  forward_oid = *((OID *) context->home_recdes.data);
19594  }
19595  else
19596  {
19597  /* oid is provided, use it */
19598  COPY_OID (&forward_oid, forward_oid_hint);
19599  }
19600 
19601  /* prepare VPID */
19602  forward_vpid.pageid = forward_oid.pageid;
19603  forward_vpid.volid = forward_oid.volid;
19604 
19605  /* fix forward page */
19607  rc = pgbuf_ordered_fix (thread_p, &forward_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, context->forward_page_watcher_p);
19608  if (rc != NO_ERROR)
19609  {
19610  if (rc == ER_LK_PAGE_TIMEOUT && er_errid () == NO_ERROR)
19611  {
19612  er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_PAGE_LATCH_ABORTED, 2, forward_vpid.volid, forward_vpid.pageid);
19613  }
19614  return ER_FAILED;
19615  }
19616  (void) pgbuf_check_page_ptype (thread_p, context->forward_page_watcher_p->pgptr, PAGE_HEAP);
19617 
19618 #if defined(CUBRID_DEBUG)
19619  if (spage_get_record_type (context->forward_page_watcher_p->pgptr, forward_oid.slotid) != REC_NEWHOME)
19620  {
19622  forward_oid.slotid);
19623  return ER_FAILED;
19624  }
19625 #endif
19626 
19627  /* all ok */
19628  return NO_ERROR;
19629 }
19630 
19631 /*
19632  * heap_build_forwarding_recdes () - build a record descriptor for pointing to
19633  * a forward object
19634  * recdes_p(in): record descriptor to build into
19635  * rec_type(in): type of record
19636  * forward_oid(in): the oid where the forwarding record will point
19637  */
19638 static void
19639 heap_build_forwarding_recdes (RECDES * recdes_p, INT16 rec_type, OID * forward_oid)
19640 {
19641  assert (recdes_p != NULL);
19642  assert (forward_oid != NULL);
19643 
19644  recdes_p->type = rec_type;
19645  recdes_p->data = (char *) forward_oid;
19646 
19647  recdes_p->length = sizeof (OID);
19648  recdes_p->area_size = sizeof (OID);
19649 }
19650 
19651 /*
19652  * heap_insert_adjust_recdes_header () - adjust record header for insert
19653  * operation
19654  * thread_p(in): thread entry
19655  * insert_context(in/out): insert context
19656  * is_mvcc_class(in): true, if MVCC class
19657  * returns: error code or NO_ERROR
19658  *
19659  * NOTE: For MVCC class, it will add an insert_id to the header. For non-MVCC class, it will clear all flags.
19660  * The function will alter the provided record descriptor data area.
19661  */
19662 static int
19663 heap_insert_adjust_recdes_header (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * insert_context, bool is_mvcc_class)
19664 {
19666  int record_size;
19667  int repid_and_flag_bits = 0, mvcc_flags = 0;
19668  char *new_ins_mvccid_pos_p, *start_p, *existing_data_p;
19669  MVCCID mvcc_id;
19670  bool use_optimization = false;
19671 
19672  assert (insert_context != NULL);
19673  assert (insert_context->type == HEAP_OPERATION_INSERT);
19674  assert (insert_context->recdes_p != NULL);
19675 
19676  record_size = insert_context->recdes_p->length;
19677 
19678  repid_and_flag_bits = OR_GET_MVCC_REPID_AND_FLAG (insert_context->recdes_p->data);
19679  mvcc_flags = (repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK;
19680 
19681 #if defined (SERVER_MODE)
19682  /* In case of partitions, it is possible to have OR_MVCC_FLAG_VALID_PREV_VERSION flag. */
19683  use_optimization = (is_mvcc_class && (insert_context->update_in_place == UPDATE_INPLACE_NONE)
19684  && (!(mvcc_flags & OR_MVCC_FLAG_VALID_PREV_VERSION))
19685  && !heap_is_big_length (record_size + OR_MVCCID_SIZE));
19686 #endif
19687 
19688  if (use_optimization)
19689  {
19690  /*
19691  * Most common case. Since is UPDATE_INPLACE_NONE, the header does not have DELID.
19692  * Optimize header adjustment.
19693  */
19694  assert (!(mvcc_flags & OR_MVCC_FLAG_VALID_DELID));
19695  mvcc_id = logtb_get_current_mvccid (thread_p);
19696 
19697  start_p = insert_context->recdes_p->data;
19698  /* Skip bytes up to insid_offset */
19699  new_ins_mvccid_pos_p = start_p + OR_MVCC_INSERT_ID_OFFSET;
19700 
19701  if (!(mvcc_flags & OR_MVCC_FLAG_VALID_INSID))
19702  {
19703  /* Sets MVCC INSID flag, overwrite first four bytes. */
19704  repid_and_flag_bits |= (OR_MVCC_FLAG_VALID_INSID << OR_MVCC_FLAG_SHIFT_BITS);
19705  OR_PUT_INT (start_p, repid_and_flag_bits);
19706 
19707  /* Move the record data before inserting INSID */
19708  assert (insert_context->recdes_p->area_size >= insert_context->recdes_p->length + OR_MVCCID_SIZE);
19709  existing_data_p = new_ins_mvccid_pos_p;
19710  memmove (new_ins_mvccid_pos_p + OR_MVCCID_SIZE, existing_data_p,
19711  insert_context->recdes_p->length - OR_MVCC_INSERT_ID_OFFSET);
19712  insert_context->recdes_p->length += OR_MVCCID_SIZE;
19713  }
19714 
19715  /* Sets the MVCC INSID */
19716  OR_PUT_BIGINT (new_ins_mvccid_pos_p, &mvcc_id);
19717 
19718  return NO_ERROR;
19719  }
19720 
19721  /* read MVCC header from record */
19722  if (or_mvcc_get_header (insert_context->recdes_p, &mvcc_rec_header) != NO_ERROR)
19723  {
19724  return ER_FAILED;
19725  }
19726 
19727  if (insert_context->update_in_place != UPDATE_INPLACE_OLD_MVCCID)
19728  {
19729 #if defined (SERVER_MODE)
19730  if (is_mvcc_class)
19731  {
19732  /* get MVCC id */
19733  mvcc_id = logtb_get_current_mvccid (thread_p);
19734 
19735  /* set MVCC INSID if necessary */
19737  {
19739  record_size += OR_MVCCID_SIZE;
19740  }
19741  MVCC_SET_INSID (&mvcc_rec_header, mvcc_id);
19742  }
19743  else
19744 #endif /* SERVER_MODE */
19745  {
19746  int curr_header_size, new_header_size;
19747 
19748  /* strip MVCC information */
19752 
19753  /* compute new record size */
19754  record_size -= (curr_header_size - new_header_size);
19755  }
19756  }
19758  {
19759  insert_context->is_redistribute_insert_with_delid = true;
19760  }
19761 
19763 
19764  if (is_mvcc_class && heap_is_big_length (record_size))
19765  {
19766  /* for multipage records, set MVCC header size to maximum size */
19768  }
19769 
19770  /* write the header back to the record */
19771  if (or_mvcc_set_header (insert_context->recdes_p, &mvcc_rec_header) != NO_ERROR)
19772  {
19773  return ER_FAILED;
19774  }
19775 
19776  /* all ok */
19777  return NO_ERROR;
19778 }
19779 
19780 /*
19781  * heap_update_adjust_recdes_header () - adjust record header for update
19782  * operation
19783  * thread_p(in): thread entry
19784  * update_context(in/out): update context
19785  * is_mvcc_class(in): specifies whether is MVCC class
19786  * returns: error code or NO_ERROR
19787  *
19788  * NOTE: For MVCC operation, it will add an insert_id and prev version to the header. The prev_version_lsa will be
19789  * filled at the end of the update, in heap_update_set_prev_version().
19790  * For non-MVCC operations, it will clear all flags.
19791  * The function will alter the provided record descriptor data area.
19792  */
19793 static int
19794 heap_update_adjust_recdes_header (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * update_context, bool is_mvcc_class)
19795 {
19797  int record_size;
19798  int repid_and_flag_bits = 0, mvcc_flags = 0, update_mvcc_flags;
19799  char *start_p, *new_ins_mvccid_pos_p, *existing_data_p, *new_data_p;
19800  MVCCID mvcc_id;
19801  bool use_optimization = false;
19802  LOG_LSA null_lsa = LSA_INITIALIZER;
19803  bool is_mvcc_op = false;
19804 
19805  assert (update_context != NULL);
19806  assert (update_context->type == HEAP_OPERATION_UPDATE);
19807  assert (update_context->recdes_p != NULL);
19808 
19809  record_size = update_context->recdes_p->length;
19810 
19811  repid_and_flag_bits = OR_GET_MVCC_REPID_AND_FLAG (update_context->recdes_p->data);
19812  mvcc_flags = (repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK;
19814 
19815  is_mvcc_op = HEAP_UPDATE_IS_MVCC_OP (is_mvcc_class, update_context->update_in_place);
19816 #if defined (SERVER_MODE)
19817  use_optimization = (is_mvcc_op && !heap_is_big_length (record_size + OR_MVCCID_SIZE + OR_MVCC_PREV_VERSION_LSA_SIZE));
19818 #endif
19819 
19820  if (use_optimization)
19821  {
19822  /*
19823  * Most common case. Since is UPDATE_INPLACE_NONE, the header does not have DELID.
19824  * Optimize header adjustment.
19825  */
19826  assert (!(mvcc_flags & OR_MVCC_FLAG_VALID_DELID));
19827  mvcc_id = logtb_get_current_mvccid (thread_p);
19828  start_p = update_context->recdes_p->data;
19829 
19830  /* Skip bytes up to insid_offset */
19831  new_ins_mvccid_pos_p = start_p + OR_MVCC_INSERT_ID_OFFSET;
19832 
19833  /* Check whether we need to set flags and to reserve space. */
19834  if ((mvcc_flags & update_mvcc_flags) != update_mvcc_flags)
19835  {
19836  /* Need to set flags and reserve space for MVCCID and/or PREV LSA */
19837  existing_data_p = new_ins_mvccid_pos_p;
19838 
19839  /* Computes added bytes and new flags */
19840  if (mvcc_flags & OR_MVCC_FLAG_VALID_INSID)
19841  {
19842  existing_data_p += OR_MVCCID_SIZE;
19843  }
19844 
19845  if (mvcc_flags & OR_MVCC_FLAG_VALID_PREV_VERSION)
19846  {
19847  existing_data_p += OR_MVCC_PREV_VERSION_LSA_SIZE;
19848  }
19849 
19850  /* Sets the new flags, overwrite first four bytes. */
19851  repid_and_flag_bits |= (update_mvcc_flags << OR_MVCC_FLAG_SHIFT_BITS);
19852  OR_PUT_INT (start_p, repid_and_flag_bits);
19853 
19854  /* Move the record data before inserting INSID and LOG_LSA */
19855  new_data_p = new_ins_mvccid_pos_p + OR_MVCCID_SIZE + OR_MVCC_PREV_VERSION_LSA_SIZE;
19856  assert (existing_data_p < new_data_p);
19857  assert (update_context->recdes_p->area_size >= update_context->recdes_p->length
19858  + CAST_BUFLEN (new_data_p - existing_data_p));
19859  memmove (new_data_p, existing_data_p,
19860  update_context->recdes_p->length - CAST_BUFLEN (existing_data_p - start_p));
19861  update_context->recdes_p->length += (CAST_BUFLEN (new_data_p - existing_data_p));
19862  }
19863 
19864  /* Sets the MVCC INSID */
19865  OR_PUT_BIGINT (new_ins_mvccid_pos_p, &mvcc_id);
19866 
19867  /*
19868  * Adds NULL LSA after INSID. The prev_version_lsa will be filled at the end of the update,
19869  * in heap_update_set_prev_version().
19870  */
19871  memcpy (new_ins_mvccid_pos_p + OR_MVCCID_SIZE, &null_lsa, OR_MVCC_PREV_VERSION_LSA_SIZE);
19872  return NO_ERROR;
19873  }
19874 
19875 
19876  /* read MVCC header from record */
19877  if (or_mvcc_get_header (update_context->recdes_p, &mvcc_rec_header) != NO_ERROR)
19878  {
19879  return ER_FAILED;
19880  }
19881 
19882  if (update_context->update_in_place != UPDATE_INPLACE_OLD_MVCCID)
19883  {
19884 #if defined (SERVER_MODE)
19885  if (is_mvcc_class)
19886  {
19887  /* get MVCC id */
19888  MVCCID mvcc_id = logtb_get_current_mvccid (thread_p);
19889 
19890  /* set MVCC INSID if necessary */
19892  {
19894  record_size += OR_MVCCID_SIZE;
19895  }
19896  MVCC_SET_INSID (&mvcc_rec_header, mvcc_id);
19897  }
19898  else
19899 #endif /* SERVER_MODE */
19900  {
19901  int curr_header_size, new_header_size;
19902 
19903  /* strip MVCC information */
19907 
19908  /* compute new record size */
19909  record_size -= (curr_header_size - new_header_size);
19910  }
19911  }
19912 
19913 #if defined (SERVER_MODE)
19914  if (is_mvcc_op)
19915  {
19917  {
19919  record_size += OR_MVCC_PREV_VERSION_LSA_SIZE;
19920  }
19921 
19922  /* The prev_version_lsa will be filled at the end of the update, in heap_update_set_prev_version() */
19924  }
19925  else
19926 #endif /* SERVER_MODE */
19927  {
19929  }
19930 
19931  if (is_mvcc_class && heap_is_big_length (record_size))
19932  {
19933  /* for multipage records, set MVCC header size to maximum size */
19935  }
19936 
19937  /* write the header back to the record */
19938  if (or_mvcc_set_header (update_context->recdes_p, &mvcc_rec_header) != NO_ERROR)
19939  {
19940  return ER_FAILED;
19941  }
19942 
19943  /* all ok */
19944  return NO_ERROR;
19945 }
19946 
19947 /*
19948  * heap_insert_handle_multipage_record () - handle a multipage object for insert
19949  * thread_p(in): thread entry
19950  * context(in): operation context
19951  *
19952  * NOTE: In case of multipage records, this function will perform the overflow
19953  * insertion and provide a forwarding record descriptor in map_recdes.
19954  * recdes_p will point to the map_recdes structure for insertion in home
19955  * page.
19956  */
19957 static int
19958 heap_insert_handle_multipage_record (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context)
19959 {
19960  assert (context != NULL);
19961  assert (context->type == HEAP_OPERATION_INSERT || context->type == HEAP_OPERATION_UPDATE);
19962  assert (context->recdes_p != NULL);
19963 
19964  /* check for big record */
19965  if (!heap_is_big_length (context->recdes_p->length))
19966  {
19967  return NO_ERROR;
19968  }
19969 
19970  /* insert overflow record */
19971  if (heap_ovf_insert (thread_p, &context->hfid, &context->ovf_oid, context->recdes_p) == NULL)
19972  {
19973  return ER_FAILED;
19974  }
19975 
19976  /* Add a map record to point to the record in overflow */
19977  /* NOTE: MVCC information is held in overflow record */
19978  heap_build_forwarding_recdes (&context->map_recdes, REC_BIGONE, &context->ovf_oid);
19979 
19980  /* use map_recdes for page insertion */
19981  context->recdes_p = &context->map_recdes;
19982 
19983  /* all ok */
19984  return NO_ERROR;
19985 }
19986 
19987 /*
19988  * heap_get_insert_location_with_lock () - get a page (and possibly and slot)
19989  * for insert and lock the OID
19990  * thread_p(in): thread entry
19991  * context(in): operation context
19992  * home_hint_p(in): if not null, will try to find and lock a slot in hinted page
19993  * returns: error code or NO_ERROR
19994  *
19995  * NOTE: For all operations, this function will find a suitable page, put it
19996  * in context->home_page_watcher, find a suitable slot, lock it and
19997  * put the exact insert location in context->res_oid.
19998  * NOTE: If a home hint is present, the function will search for a free and
19999  * lockable slot ONLY in the hinted page. If no hint is present, it will
20000  * find the page on it's own.
20001  */
20002 static int
20003 heap_get_insert_location_with_lock (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context,
20004  PGBUF_WATCHER * home_hint_p)
20005 {
20006  int slot_count, slot_id, lk_result;
20007  LOCK lock;
20008  int error_code = NO_ERROR;
20009 
20010  /* check input */
20011  assert (context != NULL);
20012  assert (context->type == HEAP_OPERATION_INSERT);
20013  assert (context->recdes_p != NULL);
20014 
20015  if (home_hint_p == NULL)
20016  {
20017  /* find and fix page for insert */
20018  if (heap_stats_find_best_page (thread_p, &context->hfid, context->recdes_p->length,
20019  (context->recdes_p->type != REC_NEWHOME), context->recdes_p->length,
20020  context->scan_cache_p, context->home_page_watcher_p) == NULL)
20021  {
20022  ASSERT_ERROR_AND_SET (error_code);
20023  return error_code;
20024  }
20025  }
20026  else
20027  {
20028  assert (home_hint_p->pgptr != NULL);
20029 
20030  /* check page for space and use hinted page as insert page */
20031  if (spage_max_space_for_new_record (thread_p, home_hint_p->pgptr) < context->recdes_p->length)
20032  {
20033  return ER_SP_NOSPACE_IN_PAGE;
20034  }
20035  context->home_page_watcher_p = home_hint_p;
20036  }
20037  assert (context->home_page_watcher_p->pgptr != NULL);
20038 
20039  /* partially populate output OID */
20041  context->res_oid.pageid = pgbuf_get_page_id (context->home_page_watcher_p->pgptr);
20042 
20043  /*
20044  * Find a slot that is lockable and lock it
20045  */
20046  /* determine lock type */
20047  if (OID_IS_ROOTOID (&context->class_oid))
20048  {
20049  /* class creation */
20050  lock = SCH_M_LOCK;
20051  }
20052  else
20053  {
20054  /* instance */
20055  lock = X_LOCK;
20056  }
20057 
20058  /* retrieve number of slots in page */
20059  slot_count = spage_number_of_slots (context->home_page_watcher_p->pgptr);
20060 
20061  /* find REC_DELETED_WILL_REUSE slot or add new slot */
20062  /* slot_id == slot_count means add new slot */
20063  for (slot_id = 0; slot_id <= slot_count; slot_id++)
20064  {
20065  slot_id = spage_find_free_slot (context->home_page_watcher_p->pgptr, NULL, slot_id);
20066  if (slot_id == SP_ERROR)
20067  {
20068  break; /* this will not happen */
20069  }
20070 
20071  context->res_oid.slotid = slot_id;
20072 
20073  /* lock the object to be inserted conditionally */
20074  lk_result = lock_object (thread_p, &context->res_oid, &context->class_oid, lock, LK_COND_LOCK);
20075  if (lk_result == LK_GRANTED)
20076  {
20077  /* successfully locked! */
20078  return NO_ERROR;
20079  }
20080  else if (lk_result != LK_NOTGRANTED_DUE_TIMEOUT)
20081  {
20082 #if !defined(NDEBUG)
20083  if (lk_result == LK_NOTGRANTED_DUE_ABORTED)
20084  {
20085  LOG_TDES *tdes = LOG_FIND_CURRENT_TDES (thread_p);
20087  }
20088  else
20089  {
20090  assert (false); /* unknown locking error */
20091  }
20092 #endif
20093  break; /* go to error case */
20094  }
20095  }
20096 
20097  /* either lock error or no slot was found in page (which should not happen) */
20098  OID_SET_NULL (&context->res_oid);
20099  if (context->home_page_watcher_p != home_hint_p)
20100  {
20101  pgbuf_ordered_unfix (thread_p, context->home_page_watcher_p);
20102  }
20103  else
20104  {
20105  context->home_page_watcher_p = NULL;
20106  }
20107  assert (false);
20108  return ER_FAILED;
20109 }
20110 
20111 
20112 /*
20113  * heap_find_location_and_insert_rec_newhome () - find location in a heap page
20114  * and then insert context->record
20115  * thread_p(in): thread entry
20116  * context(in): operation context
20117  * returns: error code or NO_ERROR
20118  *
20119  * NOTE: This function will find a suitable page, put it in
20120  * context->home_page_watcher, insert context->recdes_p into that page
20121  * and put recdes location into context->res_oid.
20122  * Currently, this function is called only for REC_NEWHOME records, when
20123  * lock acquisition is not required.
20124  * The caller must log the inserted data.
20125  */
20126 static int
20127 heap_find_location_and_insert_rec_newhome (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context)
20128 {
20129  int sp_success;
20130  int error_code = NO_ERROR;
20131 
20132  /* check input */
20133  assert (context != NULL);
20134  assert (context->type == HEAP_OPERATION_INSERT);
20135  assert (context->recdes_p != NULL);
20136  assert (context->recdes_p->type == REC_NEWHOME);
20137 
20138 #if defined(CUBRID_DEBUG)
20139  if (heap_is_big_length (context->recdes_p->length))
20140  {
20142  "heap_insert_internal: This function does not accept"
20143  " objects longer than %d. An object of %d was given\n", heap_Maxslotted_reclength, recdes->length);
20144  return ER_FAILED;
20145  }
20146 #endif
20147 
20148  if (heap_stats_find_best_page (thread_p, &context->hfid, context->recdes_p->length, false, context->recdes_p->length,
20149  context->scan_cache_p, context->home_page_watcher_p) == NULL)
20150  {
20151  ASSERT_ERROR_AND_SET (error_code);
20152  return error_code;
20153  }
20154 
20155 #if !defined(NDEBUG)
20156  if (context->scan_cache_p != NULL)
20157  {
20158  OID heap_class_oid;
20159 
20160  assert (heap_get_class_oid_from_page (thread_p, context->home_page_watcher_p->pgptr, &heap_class_oid) ==
20161  NO_ERROR);
20162 
20163  assert (OID_EQ (&heap_class_oid, &context->scan_cache_p->node.class_oid));
20164  }
20165 #endif
20166 
20167  assert (context->home_page_watcher_p->pgptr != NULL);
20168  (void) pgbuf_check_page_ptype (thread_p, context->home_page_watcher_p->pgptr, PAGE_HEAP);
20169 
20170  sp_success =
20171  spage_insert (thread_p, context->home_page_watcher_p->pgptr, context->recdes_p, &context->res_oid.slotid);
20172  if (sp_success == SP_SUCCESS)
20173  {
20175  context->res_oid.pageid = pgbuf_get_page_id (context->home_page_watcher_p->pgptr);
20176 
20177  return NO_ERROR;
20178  }
20179  else
20180  {
20181  assert (false);
20182  if (sp_success != SP_ERROR)
20183  {
20185  }
20186  OID_SET_NULL (&context->res_oid);
20187  pgbuf_ordered_unfix (thread_p, context->home_page_watcher_p);
20188  return ER_FAILED;
20189  }
20190 }
20191 
20192 /*
20193  * heap_insert_newhome () - will find an insert location for a REC_NEWHOME
20194  * record and will insert it there
20195  * thread_p(in): thread entry
20196  * parent_context(in): the context of the parent operation
20197  * recdes_p(in): record descriptor of newhome record
20198  * out_oid_p(in): pointer to an OID object to be populated with the result
20199  * OID of the insert
20200  * newhome_pg_watcher(out): if not null, should keep the page watcher of newhome
20201  - necessary to set prev version afterwards
20202  * returns: error code or NO_ERROR
20203  *
20204  * NOTE: This function works ONLY in an MVCC operation. It will create a new
20205  * context for the insert operation.
20206  */
20207 static int
20208 heap_insert_newhome (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * parent_context, RECDES * recdes_p,
20209  OID * out_oid_p, PGBUF_WATCHER * newhome_pg_watcher)
20210 {
20211  HEAP_OPERATION_CONTEXT ins_context;
20212  int error_code = NO_ERROR;
20213 
20214  /* check input */
20215  assert (recdes_p != NULL);
20216  assert (parent_context != NULL);
20217  assert (parent_context->type == HEAP_OPERATION_DELETE || parent_context->type == HEAP_OPERATION_UPDATE);
20218 
20219  /* build insert context */
20220  heap_create_insert_context (&ins_context, &parent_context->hfid, &parent_context->class_oid, recdes_p, NULL);
20221 
20222  /* physical insertion */
20223  error_code = heap_find_location_and_insert_rec_newhome (thread_p, &ins_context);
20224  if (error_code != NO_ERROR)
20225  {
20226  ASSERT_ERROR ();
20227  return error_code;
20228  }
20229 
20230  HEAP_PERF_TRACK_EXECUTE (thread_p, parent_context);
20231 
20232  /* log operation */
20233 
20234  /* This is a relocation of existing record, be it deleted or updated. Vacuum is not supposed to be notified since he
20235  * never check REC_NEWHOME type records. An MVCC type logging is not required here, a simple RVHF_INSERT will do. */
20236  heap_log_insert_physical (thread_p, ins_context.home_page_watcher_p->pgptr, &ins_context.hfid.vfid,
20237  &ins_context.res_oid, ins_context.recdes_p, false, false);
20238 
20239  HEAP_PERF_TRACK_LOGGING (thread_p, parent_context);
20240 
20241  /* advertise insert location */
20242  if (out_oid_p != NULL)
20243  {
20244  COPY_OID (out_oid_p, &ins_context.res_oid);
20245  }
20246 
20247  /* mark insert page as dirty */
20248  pgbuf_set_dirty (thread_p, ins_context.home_page_watcher_p->pgptr, DONT_FREE);
20249 
20250  if (newhome_pg_watcher != NULL)
20251  {
20252  /* keep the page watcher, necessary for heap_update_set_prev_version() */
20253  pgbuf_replace_watcher (thread_p, ins_context.home_page_watcher_p, newhome_pg_watcher);
20254  }
20255 
20256  /* unfix all pages of insert context */
20257  heap_unfix_watchers (thread_p, &ins_context);
20258  /* all ok */
20259  return NO_ERROR;
20260 }
20261 
20262 /*
20263  * heap_insert_physical () - physical insert into heap page
20264  * thread_p(in): thread entry
20265  * context(in): operation context
20266  * is_mvcc_op(in): MVCC or non-MVCC operation
20267  *
20268  * NOTE: This function should receive a fixed page and a location in res_oid,
20269  * where the context->recdes_p will go in.
20270  */
20271 static int
20272 heap_insert_physical (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context)
20273 {
20274  /* check input */
20275  assert (context != NULL);
20276  assert (context->type == HEAP_OPERATION_INSERT);
20277  assert (context->recdes_p != NULL);
20278  assert (context->home_page_watcher_p->pgptr != NULL);
20279 
20280  /* assume we have the exact location for insert as well as a fixed page */
20281  assert (context->res_oid.volid != NULL_VOLID);
20282  assert (context->res_oid.pageid != NULL_PAGEID);
20283  assert (context->res_oid.slotid != NULL_SLOTID);
20284 
20285 #if defined(CUBRID_DEBUG)
20286  /* function should have received map record if input record was multipage */
20287  if (heap_is_big_length (context->recdes_p->length))
20288  {
20290  "heap_insert_internal: This function does not accept"
20291  " objects longer than %d. An object of %d was given\n", heap_Maxslotted_reclength, recdes->length);
20292  return ER_FAILED;
20293  }
20294 
20295  /* check we're inserting in a page of desired class */
20296  if (!OID_ISNULL (&context->class_oid))
20297  {
20298  OID heap_class_oid;
20299  int rc;
20300 
20301  rc = heap_get_class_oid_from_page (thread_p, context->home_page_watcher_p->pgptr, &heap_class_oid);
20302  assert (rc == NO_ERROR);
20303  assert (OID_EQ (&heap_class_oid, &context->class_oid));
20304  }
20305 #endif
20306 
20307  /* physical insertion */
20308  if (spage_insert_at (thread_p, context->home_page_watcher_p->pgptr, context->res_oid.slotid, context->recdes_p) !=
20309  SP_SUCCESS)
20310  {
20312  OID_SET_NULL (&context->res_oid);
20313  return ER_FAILED;
20314  }
20315 
20316  /* all ok */
20317  return NO_ERROR;
20318 }
20319 
20320 /*
20321  * heap_log_insert_physical () - add logging information for physical insertion
20322  * thread_p(in): thread entry
20323  * page_p(in): page where insert was performed
20324  * vfid_p(in): virtual file id
20325  * oid_p(in): newly inserted object id
20326  * recdes_p(in): record descriptor of inserted record
20327  * is_mvcc_op(in): specifies type of operation (MVCC/non-MVCC)
20328  * is_redistribute_op(in): whether the insertion is due to partition
20329  * redistribute operation and has a valid delid
20330  */
20331 static void
20332 heap_log_insert_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, VFID * vfid_p, OID * oid_p, RECDES * recdes_p,
20333  bool is_mvcc_op, bool is_redistribute_op)
20334 {
20335  LOG_DATA_ADDR log_addr;
20336 
20337  /* populate address field */
20338  log_addr.vfid = vfid_p;
20339  log_addr.offset = oid_p->slotid;
20340  log_addr.pgptr = page_p;
20341 
20342  if (is_mvcc_op)
20343  {
20344  if (is_redistribute_op)
20345  {
20346  /* this is actually a deleted record, inserted due to a PARTITION reorganize operation. Log this operation
20347  * separately */
20348  heap_mvcc_log_redistribute (thread_p, recdes_p, &log_addr);
20349  }
20350  else
20351  {
20352  /* MVCC logging */
20353  heap_mvcc_log_insert (thread_p, recdes_p, &log_addr);
20354  }
20355  }
20356  else
20357  {
20358  INT16 bytes_reserved;
20359  RECDES temp_recdes;
20360 
20361  if (recdes_p->type == REC_ASSIGN_ADDRESS)
20362  {
20363  /* special case for REC_ASSIGN */
20364  temp_recdes.type = recdes_p->type;
20365  temp_recdes.area_size = sizeof (bytes_reserved);
20366  temp_recdes.length = sizeof (bytes_reserved);
20367  bytes_reserved = (INT16) recdes_p->length;
20368  temp_recdes.data = (char *) &bytes_reserved;
20369  log_append_undoredo_recdes (thread_p, RVHF_INSERT, &log_addr, NULL, &temp_recdes);
20370  }
20371  else if (recdes_p->type == REC_NEWHOME)
20372  {
20373  /* replication for REC_NEWHOME is performed by following the link (OID) from REC_RELOCATION */
20374  log_append_undoredo_recdes (thread_p, RVHF_INSERT_NEWHOME, &log_addr, NULL, recdes_p);
20375  }
20376  else
20377  {
20378  log_append_undoredo_recdes (thread_p, RVHF_INSERT, &log_addr, NULL, recdes_p);
20379  }
20380  }
20381 }
20382 
20383 /*
20384  * heap_delete_adjust_header () - adjust MVCC record header for delete operation
20385  *
20386  * header_p(in): MVCC record header
20387  * mvcc_id(in): MVCC identifier
20388  * need_mvcc_header_max_size(in): true, if need maximum size for MVCC header
20389  *
20390  * NOTE: Only applicable for MVCC operations.
20391  */
20392 static void
20393 heap_delete_adjust_header (MVCC_REC_HEADER * header_p, MVCCID mvcc_id, bool need_mvcc_header_max_size)
20394 {
20395  assert (header_p != NULL);
20396 
20398  MVCC_SET_DELID (header_p, mvcc_id);
20399 
20400  if (need_mvcc_header_max_size)
20401  {
20402  /* set maximum MVCC header size */
20404  }
20405 }
20406 
20407 /*
20408  * heap_get_delete_location () - find the desired object and fix the page
20409  * thread_p(in): thread entry
20410  * context(in): delete operation context
20411  * return: error code or NO_ERROR
20412  */
20413 static int
20414 heap_get_record_location (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context)
20415 {
20416  VPID vpid;
20417 
20418  /* check input */
20419  assert (context != NULL);
20420  assert (!OID_ISNULL (&context->oid));
20421  assert (!HFID_IS_NULL (&context->hfid));
20422 
20423  /* get vpid from object */
20424  vpid.pageid = context->oid.pageid;
20425  vpid.volid = context->oid.volid;
20426 
20427  /* first try to retrieve cached fixed page from scancache */
20428  if (context->scan_cache_p != NULL && context->scan_cache_p->page_watcher.pgptr != NULL
20429  && context->scan_cache_p->cache_last_fix_page == true)
20430  {
20431  VPID *vpid_incache_p = pgbuf_get_vpid_ptr (context->scan_cache_p->page_watcher.pgptr);
20432 
20433  if (VPID_EQ (&vpid, vpid_incache_p))
20434  {
20435  /* we can get it from the scancache */
20436  pgbuf_replace_watcher (thread_p, &context->scan_cache_p->page_watcher, context->home_page_watcher_p);
20437  }
20438  else
20439  {
20440  /* last scancache fixed page is not desired page */
20441  pgbuf_ordered_unfix (thread_p, &context->scan_cache_p->page_watcher);
20442  }
20443  assert (context->scan_cache_p->page_watcher.pgptr == NULL);
20444  }
20445 
20446  /* if scancache page was not suitable, fix desired page */
20447  if (context->home_page_watcher_p->pgptr == NULL)
20448  {
20449  (void) heap_scan_pb_lock_and_fetch (thread_p, &vpid, OLD_PAGE, X_LOCK, context->scan_cache_p,
20450  context->home_page_watcher_p);
20451  if (context->home_page_watcher_p->pgptr == NULL)
20452  {
20453  int rc;
20454 
20455  if (er_errid () == ER_PB_BAD_PAGEID)
20456  {
20458  context->oid.pageid, context->oid.slotid);
20459  }
20460 
20461  /* something went wrong, return */
20462  ASSERT_ERROR_AND_SET (rc);
20463  return rc;
20464  }
20465  }
20466 
20467 #if !defined(NDEBUG)
20468  if (context->scan_cache_p != NULL)
20469  {
20470  OID heap_class_oid;
20471 
20472  assert (heap_get_class_oid_from_page (thread_p, context->home_page_watcher_p->pgptr, &heap_class_oid) ==
20473  NO_ERROR);
20474  assert ((OID_EQ (&heap_class_oid, &context->scan_cache_p->node.class_oid))
20475  || (OID_ISNULL (&context->scan_cache_p->node.class_oid)
20477  context->oid.slotid) == REC_ASSIGN_ADDRESS));
20478  }
20479 #endif
20480 
20481  /* all ok */
20482  return NO_ERROR;
20483 }
20484 
20485 /*
20486  * heap_delete_bigone () - delete a REC_BIGONE record
20487  * thread_p(in): thread entry
20488  * context(in): operation context
20489  * is_mvcc_op(in): specifies type of operation (MVCC/non-MVCC)
20490  */
20491 static int
20492 heap_delete_bigone (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op)
20493 {
20494  OID overflow_oid;
20495  int rc;
20496 
20497  /* check input */
20498  assert (context != NULL);
20499  assert (context->type == HEAP_OPERATION_DELETE);
20500  assert (context->home_recdes.data != NULL);
20501  assert (context->home_page_watcher_p != NULL);
20502  assert (context->home_page_watcher_p->pgptr != NULL);
20503  assert (context->overflow_page_watcher_p != NULL);
20504  assert (context->overflow_page_watcher_p->pgptr == NULL);
20505 
20506  /* MVCC info is in overflow page, we only keep and OID in home */
20507  overflow_oid = *((OID *) context->home_recdes.data);
20508 
20509  /* reset overflow watcher rank */
20511 
20512  if (is_mvcc_op)
20513  {
20514  MVCC_REC_HEADER overflow_header;
20515  VPID overflow_vpid;
20516  LOG_DATA_ADDR log_addr;
20517  MVCCID mvcc_id = logtb_get_current_mvccid (thread_p);
20518 
20519  /* fix overflow page */
20520  overflow_vpid.pageid = overflow_oid.pageid;
20521  overflow_vpid.volid = overflow_oid.volid;
20523  rc = pgbuf_ordered_fix (thread_p, &overflow_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, context->overflow_page_watcher_p);
20524  if (rc != NO_ERROR)
20525  {
20526  if (rc == ER_LK_PAGE_TIMEOUT && er_errid () == NO_ERROR)
20527  {
20529  overflow_vpid.pageid);
20530  }
20531  return rc;
20532  }
20533 
20534  /* check overflow page type */
20535  (void) pgbuf_check_page_ptype (thread_p, context->overflow_page_watcher_p->pgptr, PAGE_OVERFLOW);
20536 
20537  /* fetch header from overflow */
20538  if (heap_get_mvcc_rec_header_from_overflow (context->overflow_page_watcher_p->pgptr, &overflow_header, NULL) !=
20539  NO_ERROR)
20540  {
20541  return ER_FAILED;
20542  }
20544 
20545  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
20546 
20547  /* log operation */
20548  log_addr.pgptr = context->overflow_page_watcher_p->pgptr;
20549  log_addr.vfid = &context->hfid.vfid;
20550  log_addr.offset = overflow_oid.slotid;
20551  heap_mvcc_log_delete (thread_p, &log_addr, RVHF_MVCC_DELETE_OVERFLOW);
20552 
20553  HEAP_PERF_TRACK_LOGGING (thread_p, context);
20554 
20555  /* adjust header; we don't care to make header max size since it's already done */
20556  heap_delete_adjust_header (&overflow_header, mvcc_id, false);
20557 
20558  /* write header to overflow */
20559  rc = heap_set_mvcc_rec_header_on_overflow (context->overflow_page_watcher_p->pgptr, &overflow_header);
20560  if (rc != NO_ERROR)
20561  {
20562  return rc;
20563  }
20564 
20565  /* set page as dirty */
20566  pgbuf_set_dirty (thread_p, context->overflow_page_watcher_p->pgptr, DONT_FREE);
20567 
20568  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
20569 
20570  /* Home record is not changed, but page max MVCCID and vacuum status have to change. Also vacuum needs to be
20571  * vacuum with the location of home record (REC_RELOCATION). */
20572  log_addr.vfid = &context->hfid.vfid;
20573  log_addr.pgptr = context->home_page_watcher_p->pgptr;
20574  log_addr.offset = context->oid.slotid;
20575  heap_mvcc_log_home_no_change (thread_p, &log_addr);
20576 
20577  pgbuf_set_dirty (thread_p, context->home_page_watcher_p->pgptr, DONT_FREE);
20578 
20579  HEAP_PERF_TRACK_LOGGING (thread_p, context);
20580 
20582  }
20583  else
20584  {
20585  bool is_reusable = heap_is_reusable_oid (context->file_type);
20586 
20587  /* fix header page */
20588  rc = heap_fix_header_page (thread_p, context);
20589  if (rc != NO_ERROR)
20590  {
20591  return rc;
20592  }
20593 
20594  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
20595 
20596  if (context->home_page_watcher_p->page_was_unfixed)
20597  {
20598  /*
20599  * Need to get the record again, since record may have changed
20600  * by other transactions (INSID removed by VACUUM, page compact).
20601  * The object was already locked, so the record size may be the
20602  * same or smaller (INSID removed by VACUUM).
20603  */
20604  int is_peeking = (context->home_recdes.area_size >= context->home_recdes.length) ? COPY : PEEK;
20605  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
20606  &context->home_recdes, is_peeking) != S_SUCCESS)
20607  {
20608  return ER_FAILED;
20609  }
20610  }
20611 
20612  /* log operation */
20613  heap_log_delete_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid, &context->oid,
20614  &context->home_recdes, is_reusable, NULL);
20615 
20616  HEAP_PERF_TRACK_LOGGING (thread_p, context);
20617 
20618  /* physical deletion of home record */
20619  rc = heap_delete_physical (thread_p, &context->hfid, context->home_page_watcher_p->pgptr, &context->oid);
20620  if (rc != NO_ERROR)
20621  {
20622  return rc;
20623  }
20624 
20625  /* physical deletion of overflow record */
20626  if (heap_ovf_delete (thread_p, &context->hfid, &overflow_oid, NULL) == NULL)
20627  {
20628  return ER_FAILED;
20629  }
20630 
20631  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
20632 
20634  }
20635 
20636  /* all ok */
20637  return NO_ERROR;
20638 }
20639 
20640 /*
20641  * heap_delete_relocation () - delete a REC_RELOCATION record
20642  * thread_p(in): thread entry
20643  * context(in): operation context
20644  * is_mvcc_op(in): specifies type of operation (MVCC/non-MVCC)
20645  * returns: error code or NO_ERROR
20646  */
20647 static int
20648 heap_delete_relocation (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op)
20649 {
20650  RECDES forward_recdes;
20651  OID forward_oid;
20652  int rc;
20653 
20654  /* check input */
20655  assert (context != NULL);
20656  assert (context->type == HEAP_OPERATION_DELETE);
20657  assert (context->record_type == REC_RELOCATION);
20658  assert (context->home_page_watcher_p != NULL);
20659  assert (context->home_page_watcher_p->pgptr != NULL);
20660  assert (context->forward_page_watcher_p != NULL);
20661 
20662  /* get forward oid */
20663  forward_oid = *((OID *) context->home_recdes.data);
20664 
20665  /* fix forward page */
20666  if (heap_fix_forward_page (thread_p, context, &forward_oid) != NO_ERROR)
20667  {
20668  return ER_FAILED;
20669  }
20670 
20671  /* get forward record */
20672  if (spage_get_record (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid, &forward_recdes, PEEK) !=
20673  S_SUCCESS)
20674  {
20675  return ER_FAILED;
20676  }
20677 
20678  HEAP_PERF_TRACK_PREPARE (thread_p, context);
20679 
20680  if (is_mvcc_op)
20681  {
20682  RECDES new_forward_recdes, new_home_recdes;
20683  MVCC_REC_HEADER forward_rec_header;
20684  MVCCID mvcc_id = logtb_get_current_mvccid (thread_p);
20686  OID new_forward_oid;
20687  int adjusted_size;
20688  bool fits_in_home, fits_in_forward;
20689  bool update_old_home = false;
20690  bool update_old_forward = false;
20691  bool remove_old_forward = false;
20692  bool is_adjusted_size_big = false;
20693  int delid_offset, repid_and_flag_bits, mvcc_flags;
20694  char *build_recdes_data;
20695  bool use_optimization;
20696 
20697  repid_and_flag_bits = OR_GET_MVCC_REPID_AND_FLAG (forward_recdes.data);
20698  mvcc_flags = (repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK;
20699  adjusted_size = forward_recdes.length;
20700 
20701  /*
20702  * Uses the optimization in most common cases, for now : if DELID not set and adjusted size is not big size.
20703  * Decide whether the deleted record has big size from beginning. After fixing header page, it may be possible
20704  * that the deleted record to not have big size. Since is a very rare case, don't care to optimize this case.
20705  */
20706  use_optimization = true;
20707  if (!(mvcc_flags & OR_MVCC_FLAG_VALID_DELID))
20708  {
20709  adjusted_size += OR_MVCCID_SIZE;
20710  is_adjusted_size_big = heap_is_big_length (adjusted_size);
20711  if (is_adjusted_size_big)
20712  {
20713  /* Rare case, do not optimize it now. */
20714  use_optimization = false;
20715  }
20716  }
20717  else
20718  {
20719  /* Rare case, do not optimize it now. */
20720  is_adjusted_size_big = false;
20721  use_optimization = false;
20722  }
20723 
20724 #if !defined(NDEBUG)
20725  if (is_adjusted_size_big)
20726  {
20727  /* not exactly necessary, but we'll be able to compare sizes */
20728  adjusted_size = forward_recdes.length - mvcc_header_size_lookup[mvcc_flags] + OR_MVCC_MAX_HEADER_SIZE;
20729  }
20730 #endif
20731 
20732  /* fix header if necessary */
20733  fits_in_home =
20734  spage_is_updatable (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, adjusted_size);
20735  fits_in_forward =
20736  spage_is_updatable (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid, adjusted_size);
20737  if (is_adjusted_size_big || (!fits_in_forward && !fits_in_home))
20738  {
20739  /* fix header page */
20740  rc = heap_fix_header_page (thread_p, context);
20741  if (rc != NO_ERROR)
20742  {
20743  return ER_FAILED;
20744  }
20745 
20747  {
20748  /* re-peek forward record descriptor; forward page may have been unfixed by previous pgbuf_ordered_fix()
20749  * call */
20750  if (spage_get_record (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid,
20751  &forward_recdes, PEEK) != S_SUCCESS)
20752  {
20753  return ER_FAILED;
20754  }
20755 
20756  /* Recomputes the header size, do not recomputes is_adjusted_size_big. */
20757  repid_and_flag_bits = OR_GET_MVCC_REPID_AND_FLAG (forward_recdes.data);
20758  if (mvcc_flags != ((repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK))
20759  {
20760  /* Rare case - disable optimization, in case that the flags was modified meanwhile. */
20761  mvcc_flags = (repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK;
20762  use_optimization = false;
20763 
20764 #if !defined(NDEBUG)
20765  if (is_adjusted_size_big)
20766  {
20767  /* not exactly necessary, but we'll be able to compare sizes */
20768  adjusted_size = forward_recdes.length - mvcc_header_size_lookup[mvcc_flags]
20770  }
20771 #endif
20772  }
20773  }
20774  }
20775 
20776  /* Build the new record. */
20778  REC_UNKNOWN, PTR_ALIGN (buffer, MAX_ALIGNMENT));
20779  if (use_optimization)
20780  {
20781  char *start_p;
20782 
20783  delid_offset = OR_MVCC_DELETE_ID_OFFSET (mvcc_flags);
20784  build_recdes_data = start_p = new_forward_recdes.data;
20785 
20786  /* Copy up to MVCC DELID first. */
20787  memcpy (build_recdes_data, forward_recdes.data, delid_offset);
20788  build_recdes_data += delid_offset;
20789 
20790  /* Sets MVCC DELID flag, overwrite first four bytes. */
20791  repid_and_flag_bits |= (OR_MVCC_FLAG_VALID_DELID << OR_MVCC_FLAG_SHIFT_BITS);
20792  OR_PUT_INT (start_p, repid_and_flag_bits);
20793 
20794  /* Sets the MVCC DELID. */
20795  OR_PUT_BIGINT (build_recdes_data, &mvcc_id);
20796  build_recdes_data += OR_MVCCID_SIZE;
20797 
20798  /* Copy remaining data. */
20799 #if !defined(NDEBUG)
20800  if (mvcc_flags & OR_MVCC_FLAG_VALID_PREV_VERSION)
20801  {
20802  /* Check that we need to copy from offset of LOG LSA up to the end of the buffer. */
20803  assert (delid_offset == OR_MVCC_PREV_VERSION_LSA_OFFSET (mvcc_flags));
20804  }
20805  else
20806  {
20807  /* Check that we need to copy from end of MVCC header up to the end of the buffer. */
20808  assert (delid_offset == mvcc_header_size_lookup[mvcc_flags]);
20809  }
20810 #endif
20811 
20812  memcpy (build_recdes_data, forward_recdes.data + delid_offset, forward_recdes.length - delid_offset);
20813  new_forward_recdes.length = adjusted_size;
20814  }
20815  else
20816  {
20817  int forward_rec_header_size;
20818  /*
20819  * Rare case - don't care to optimize it for now. Get the MVCC header, build adjusted record
20820  * header - slow operation.
20821  */
20822  if (or_mvcc_get_header (&forward_recdes, &forward_rec_header) != NO_ERROR)
20823  {
20824  return ER_FAILED;
20825  }
20826  assert (forward_rec_header.mvcc_flag == mvcc_flags);
20827  heap_delete_adjust_header (&forward_rec_header, mvcc_id, is_adjusted_size_big);
20828  or_mvcc_add_header (&new_forward_recdes, &forward_rec_header, OR_GET_BOUND_BIT_FLAG (forward_recdes.data),
20829  OR_GET_OFFSET_SIZE (forward_recdes.data));
20830 
20831  forward_rec_header_size = mvcc_header_size_lookup[mvcc_flags];
20832  memcpy (new_forward_recdes.data + new_forward_recdes.length, forward_recdes.data + forward_rec_header_size,
20833  forward_recdes.length - forward_rec_header_size);
20834  new_forward_recdes.length += forward_recdes.length - forward_rec_header_size;
20835  assert (new_forward_recdes.length == adjusted_size);
20836  }
20837 
20838  /* determine what operations on home/forward pages are necessary and execute extra operations for each case */
20839  if (is_adjusted_size_big)
20840  {
20841  /* insert new overflow record */
20842  if (heap_ovf_insert (thread_p, &context->hfid, &new_forward_oid, &new_forward_recdes) == NULL)
20843  {
20844  return ER_FAILED;
20845  }
20846 
20847  /* home record descriptor will be an overflow OID and will be placed in original home page */
20848  heap_build_forwarding_recdes (&new_home_recdes, REC_BIGONE, &new_forward_oid);
20849 
20850  /* remove old forward record */
20851  remove_old_forward = true;
20852  update_old_home = true;
20853 
20855  }
20856  else if (fits_in_home)
20857  {
20858  /* updated forward record fits in home page */
20859  new_home_recdes = new_forward_recdes;
20860  new_home_recdes.type = REC_HOME;
20861 
20862  /* clear forward rebuild_record (just to be safe) */
20863  new_forward_recdes.area_size = 0;
20864  new_forward_recdes.length = 0;
20865  new_forward_recdes.type = REC_UNKNOWN;
20866  new_forward_recdes.data = NULL;
20867 
20868  /* remove old forward record */
20869  remove_old_forward = true;
20870  update_old_home = true;
20871 
20873  }
20874  else if (fits_in_forward)
20875  {
20876  /* updated forward record fits in old forward page */
20877  new_forward_recdes.type = REC_NEWHOME;
20878 
20879  /* home record will not be touched */
20880  update_old_forward = true;
20881 
20883  }
20884  else
20885  {
20886  /* doesn't fit in either home or forward page */
20887  /* insert a new forward record */
20888  new_forward_recdes.type = REC_NEWHOME;
20889  rc = heap_insert_newhome (thread_p, context, &new_forward_recdes, &new_forward_oid, NULL);
20890  if (rc != NO_ERROR)
20891  {
20892  return rc;
20893  }
20894 
20895  /* new home record will be a REC_RELOCATION and will be placed in the original home page */
20896  heap_build_forwarding_recdes (&new_home_recdes, REC_RELOCATION, &new_forward_oid);
20897 
20898  /* remove old forward record */
20899  remove_old_forward = true;
20900  update_old_home = true;
20901 
20903  }
20904 
20905  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
20906 
20907  /*
20908  * Update old home record (if necessary)
20909  */
20910  if (update_old_home)
20911  {
20912  LOG_DATA_ADDR home_addr;
20913 
20914  if (context->home_page_watcher_p->page_was_unfixed)
20915  {
20916  /*
20917  * Need to get the record again, since record may have changed
20918  * by other transactions (INSID removed by VACUUM, page compact).
20919  * The object was already locked, so the record size may be the
20920  * same or smaller (INSID removed by VACUUM).
20921  */
20922  int is_peeking = (context->home_recdes.area_size >= context->home_recdes.length) ? COPY : PEEK;
20923  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
20924  &context->home_recdes, is_peeking) != S_SUCCESS)
20925  {
20926  return ER_FAILED;
20927  }
20928  }
20929 
20930  /* log operation */
20931  home_addr.vfid = &context->hfid.vfid;
20932  home_addr.pgptr = context->home_page_watcher_p->pgptr;
20933  home_addr.offset = context->oid.slotid;
20934 
20935  heap_mvcc_log_home_change_on_delete (thread_p, &context->home_recdes, &new_home_recdes, &home_addr);
20936 
20937  HEAP_PERF_TRACK_LOGGING (thread_p, context);
20938 
20939  /* update home record */
20940  rc = heap_update_physical (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
20941  &new_home_recdes);
20942  if (rc != NO_ERROR)
20943  {
20944  return rc;
20945  }
20946 
20947  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
20948  }
20949  else
20950  {
20951  /* Home record is not changed, but page max MVCCID and vacuum status have to change. Also vacuum needs to be
20952  * vacuum with the location of home record (REC_BIGONE). */
20953  LOG_DATA_ADDR home_addr;
20954 
20955  /* log operation */
20956  home_addr.vfid = &context->hfid.vfid;
20957  home_addr.pgptr = context->home_page_watcher_p->pgptr;
20958  home_addr.offset = context->oid.slotid;
20959  heap_mvcc_log_home_no_change (thread_p, &home_addr);
20960  pgbuf_set_dirty (thread_p, context->home_page_watcher_p->pgptr, DONT_FREE);
20961 
20962  HEAP_PERF_TRACK_LOGGING (thread_p, context);
20963  }
20964 
20965  /*
20966  * Update old forward record (if necessary)
20967  */
20968  if (update_old_forward)
20969  {
20970  LOG_DATA_ADDR forward_addr;
20971 
20972  /* log operation */
20973  forward_addr.vfid = &context->hfid.vfid;
20974  forward_addr.pgptr = context->forward_page_watcher_p->pgptr;
20975  forward_addr.offset = forward_oid.slotid;
20976  heap_mvcc_log_delete (thread_p, &forward_addr, RVHF_MVCC_DELETE_REC_NEWHOME);
20977 
20978  HEAP_PERF_TRACK_LOGGING (thread_p, context);
20979 
20980  /* physical update of forward record */
20981  rc =
20982  heap_update_physical (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid,
20983  &new_forward_recdes);
20984  if (rc != NO_ERROR)
20985  {
20986  return rc;
20987  }
20988 
20989  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
20990  }
20991 
20992  /*
20993  * Delete old forward record (if necessary)
20994  */
20995  if (remove_old_forward)
20996  {
20997  LOG_DATA_ADDR forward_addr;
20998 
20999  /* re-peek forward record descriptor; forward page may have been unfixed by previous pgbuf_ordered_fix() call
21000  */
21002  {
21003  if (spage_get_record (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid,
21004  &forward_recdes, PEEK) != S_SUCCESS)
21005  {
21006  return ER_FAILED;
21007  }
21008  }
21009 
21010  /* operation logging */
21011  forward_addr.vfid = &context->hfid.vfid;
21012  forward_addr.pgptr = context->forward_page_watcher_p->pgptr;
21013  forward_addr.offset = forward_oid.slotid;
21014 
21015  log_append_undoredo_recdes (thread_p, RVHF_DELETE, &forward_addr, &forward_recdes, NULL);
21016  if (heap_is_reusable_oid (context->file_type))
21017  {
21018  log_append_postpone (thread_p, RVHF_MARK_REUSABLE_SLOT, &forward_addr, 0, NULL);
21019  }
21020 
21021  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21022 
21023  /* physical removal of forward record */
21024  rc = heap_delete_physical (thread_p, &context->hfid, context->forward_page_watcher_p->pgptr, &forward_oid);
21025  if (rc != NO_ERROR)
21026  {
21027  return rc;
21028  }
21029 
21030  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21031  }
21032  }
21033  else
21034  {
21035  bool is_reusable = heap_is_reusable_oid (context->file_type);
21036 
21037  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21038 
21039  if (context->home_page_watcher_p->page_was_unfixed)
21040  {
21041  /*
21042  * Need to get the record again, since record may have changed
21043  * by other transactions (INSID removed by VACUUM, page compact).
21044  * The object was already locked, so the record size may be the
21045  * same or smaller (INSID removed by VACUUM).
21046  */
21047  int is_peeking = (context->home_recdes.area_size >= context->home_recdes.length) ? COPY : PEEK;
21048  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21049  &context->home_recdes, is_peeking) != S_SUCCESS)
21050  {
21051  return ER_FAILED;
21052  }
21053  }
21054  /*
21055  * Delete home record
21056  */
21057 
21058  heap_log_delete_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid, &context->oid,
21059  &context->home_recdes, is_reusable, NULL);
21060 
21061  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21062 
21063  /* physical deletion of home record */
21064  rc = heap_delete_physical (thread_p, &context->hfid, context->home_page_watcher_p->pgptr, &context->oid);
21065  if (rc != NO_ERROR)
21066  {
21067  return rc;
21068  }
21069 
21070  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21071 
21073  {
21074  /* re-peek forward record descriptor; forward page may have been unfixed by previous pgbuf_ordered_fix() call
21075  */
21076  if (spage_get_record (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid,
21077  &forward_recdes, PEEK) != S_SUCCESS)
21078  {
21079  return ER_FAILED;
21080  }
21081  }
21082  /*
21083  * Delete forward record
21084  */
21085  /*
21086  * It should be safe to mark the new home slot as reusable regardless
21087  * of the heap type (reusable OID or not) as the relocated record
21088  * should not be referenced anywhere in the database.
21089  */
21090  heap_log_delete_physical (thread_p, context->forward_page_watcher_p->pgptr, &context->hfid.vfid, &forward_oid,
21091  &forward_recdes, true, NULL);
21092 
21093  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21094 
21095  /* physical deletion of forward record */
21096  rc = heap_delete_physical (thread_p, &context->hfid, context->forward_page_watcher_p->pgptr, &forward_oid);
21097  if (rc != NO_ERROR)
21098  {
21099  return rc;
21100  }
21101 
21102  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21103 
21105  }
21106 
21107  /* all ok */
21108  return NO_ERROR;
21109 }
21110 
21111 /*
21112  * heap_delete_home () - delete a REC_HOME (or REC_ASSIGN_ADDRESS) record
21113  * thread_p(in): thread entry
21114  * context(in): operation context
21115  * is_mvcc_op(in): specifies type of operation (MVCC/non-MVCC)
21116  * returns: error code or NO_ERROR
21117  */
21118 static int
21119 heap_delete_home (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op)
21120 {
21121  int error_code = NO_ERROR;
21122 
21123  /* check input */
21124  assert (context != NULL);
21125  assert (context->record_type == REC_HOME || context->record_type == REC_ASSIGN_ADDRESS);
21126  assert (context->type == HEAP_OPERATION_DELETE);
21127  assert (context->home_page_watcher_p != NULL);
21128  assert (context->home_page_watcher_p->pgptr != NULL);
21129 
21130  if (context->home_page_watcher_p->page_was_unfixed)
21131  {
21132  /*
21133  * Need to get the record again, since record may have changed
21134  * by other transactions (INSID removed by VACUUM, page compact).
21135  * The object was already locked, so the record size may be the
21136  * same or smaller (INSID removed by VACUUM).
21137  */
21138  int is_peeking = (context->home_recdes.area_size >= context->home_recdes.length) ? COPY : PEEK;
21139  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21140  &context->home_recdes, is_peeking) != S_SUCCESS)
21141  {
21142  assert (false);
21143  return ER_FAILED;
21144  }
21145  }
21146 
21147  /* operation */
21148  if (is_mvcc_op)
21149  {
21150  MVCC_REC_HEADER record_header;
21151  RECDES built_recdes;
21152  RECDES forwarding_recdes;
21153  RECDES *home_page_updated_recdes;
21154  OID forward_oid;
21155  MVCCID mvcc_id = logtb_get_current_mvccid (thread_p);
21157  int adjusted_size;
21158  bool is_adjusted_size_big = false;
21159  int delid_offset, repid_and_flag_bits, mvcc_flags;
21160  char *build_recdes_data;
21161  bool use_optimization;
21162 
21163  /* Build the new record descriptor. */
21164  repid_and_flag_bits = OR_GET_MVCC_REPID_AND_FLAG (context->home_recdes.data);
21165  mvcc_flags = (repid_and_flag_bits >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK;
21166  adjusted_size = context->home_recdes.length;
21167 
21168  /* Uses the optimization in most common cases, for now : if DELID not set and adjusted size is not big size. */
21169  use_optimization = true;
21170  if (!(mvcc_flags & OR_MVCC_FLAG_VALID_DELID))
21171  {
21172  adjusted_size += OR_MVCCID_SIZE;
21173  is_adjusted_size_big = heap_is_big_length (adjusted_size);
21174  if (is_adjusted_size_big)
21175  {
21176  /* Rare case, do not optimize it now. */
21177  use_optimization = false;
21178  }
21179  }
21180  else
21181  {
21182  /* Rare case, do not optimize it now. */
21183  is_adjusted_size_big = false;
21184  use_optimization = false;
21185  }
21186 
21187 #if !defined(NDEBUG)
21188  if (is_adjusted_size_big)
21189  {
21190  /* not exactly necessary, but we'll be able to compare sizes */
21191  adjusted_size = context->home_recdes.length - mvcc_header_size_lookup[mvcc_flags] + OR_MVCC_MAX_HEADER_SIZE;
21192  }
21193 #endif
21194 
21195  /* Build the new record. */
21197  PTR_ALIGN (data_buffer, MAX_ALIGNMENT));
21198  if (use_optimization)
21199  {
21200  char *start_p;
21201 
21202  delid_offset = OR_MVCC_DELETE_ID_OFFSET (mvcc_flags);
21203 
21204  build_recdes_data = start_p = built_recdes.data;
21205 
21206  /* Copy up to MVCC DELID first. */
21207  memcpy (build_recdes_data, context->home_recdes.data, delid_offset);
21208  build_recdes_data += delid_offset;
21209 
21210  /* Sets MVCC DELID flag, overwrite first four bytes. */
21211  repid_and_flag_bits |= (OR_MVCC_FLAG_VALID_DELID << OR_MVCC_FLAG_SHIFT_BITS);
21212  OR_PUT_INT (start_p, repid_and_flag_bits);
21213 
21214  /* Sets the MVCC DELID. */
21215  OR_PUT_BIGINT (build_recdes_data, &mvcc_id);
21216  build_recdes_data += OR_MVCC_DELETE_ID_SIZE;
21217 
21218  /* Copy remaining data. */
21219 #if !defined(NDEBUG)
21220  if (mvcc_flags & OR_MVCC_FLAG_VALID_PREV_VERSION)
21221  {
21222  /* Check that we need to copy from offset of LOG LSA up to the end of the buffer. */
21223  assert (delid_offset == OR_MVCC_PREV_VERSION_LSA_OFFSET (mvcc_flags));
21224  }
21225  else
21226  {
21227  /* Check that we need to copy from end of MVCC header up to the end of the buffer. */
21228  assert (delid_offset == mvcc_header_size_lookup[mvcc_flags]);
21229  }
21230 #endif
21231 
21232  memcpy (build_recdes_data, context->home_recdes.data + delid_offset,
21233  context->home_recdes.length - delid_offset);
21234  built_recdes.length = adjusted_size;
21235  }
21236  else
21237  {
21238  int header_size;
21239  /*
21240  * Rare case - don't care to optimize it for now. Get the MVCC header, build adjusted record
21241  * header - slow operation.
21242  */
21243  error_code = or_mvcc_get_header (&context->home_recdes, &record_header);
21244  if (error_code != NO_ERROR)
21245  {
21246  ASSERT_ERROR ();
21247  return error_code;
21248  }
21249  assert (record_header.mvcc_flag == mvcc_flags);
21250 
21251  heap_delete_adjust_header (&record_header, mvcc_id, is_adjusted_size_big);
21252  or_mvcc_add_header (&built_recdes, &record_header, OR_GET_BOUND_BIT_FLAG (context->home_recdes.data),
21253  OR_GET_OFFSET_SIZE (context->home_recdes.data));
21254  header_size = mvcc_header_size_lookup[mvcc_flags];
21255  memcpy (built_recdes.data + built_recdes.length, context->home_recdes.data + header_size,
21256  context->home_recdes.length - header_size);
21257  built_recdes.length += (context->home_recdes.length - header_size);
21258  assert (built_recdes.length == adjusted_size);
21259  }
21260 
21261  /* determine type */
21262  if (is_adjusted_size_big)
21263  {
21264  built_recdes.type = REC_BIGONE;
21265  }
21266  else if (!spage_is_updatable (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21267  built_recdes.length))
21268  {
21269  built_recdes.type = REC_NEWHOME;
21270  }
21271  else
21272  {
21273  built_recdes.type = REC_HOME;
21274  }
21275 
21276  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21277 
21278  /* check whether relocation is necessary */
21279  if (built_recdes.type == REC_BIGONE || built_recdes.type == REC_NEWHOME)
21280  {
21281  /*
21282  * Relocation necessary
21283  */
21284  LOG_DATA_ADDR rec_address;
21285 
21286  /* insertion of built record */
21287  if (built_recdes.type == REC_BIGONE)
21288  {
21289  /* new record is overflow record - REC_BIGONE case */
21290  forwarding_recdes.type = REC_BIGONE;
21291  if (heap_ovf_insert (thread_p, &context->hfid, &forward_oid, &built_recdes) == NULL)
21292  {
21293  ASSERT_ERROR_AND_SET (error_code);
21294  return error_code;
21295  }
21296 
21298  }
21299  else
21300  {
21301  /* new record is relocated - REC_NEWHOME case */
21302  forwarding_recdes.type = REC_RELOCATION;
21303 
21304  /* insert NEWHOME record */
21305  error_code = heap_insert_newhome (thread_p, context, &built_recdes, &forward_oid, NULL);
21306  if (error_code != NO_ERROR)
21307  {
21308  ASSERT_ERROR ();
21309  return error_code;
21310  }
21311 
21313  }
21314 
21315  /* build forwarding rebuild_record */
21316  heap_build_forwarding_recdes (&forwarding_recdes, forwarding_recdes.type, &forward_oid);
21317 
21318  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21319 
21320  if (context->home_page_watcher_p->page_was_unfixed)
21321  {
21322  /*
21323  * Need to get the record again, since record may have changed
21324  * by other transactions (INSID removed by VACUUM, page compact).
21325  * The object was already locked, so the record size may be the
21326  * same or smaller (INSID removed by VACUUM).
21327  */
21328  int is_peeking = (context->home_recdes.area_size >= context->home_recdes.length) ? COPY : PEEK;
21329  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21330  &context->home_recdes, is_peeking) != S_SUCCESS)
21331  {
21332  assert (false);
21333  return ER_FAILED;
21334  }
21335  }
21336 
21337  /* log relocation */
21338  rec_address.pgptr = context->home_page_watcher_p->pgptr;
21339  rec_address.vfid = &context->hfid.vfid;
21340  rec_address.offset = context->oid.slotid;
21341  heap_mvcc_log_home_change_on_delete (thread_p, &context->home_recdes, &forwarding_recdes, &rec_address);
21342 
21343  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21344 
21345  /* we'll update the home page with the forwarding record */
21346  home_page_updated_recdes = &forwarding_recdes;
21347  }
21348  else
21349  {
21350  LOG_DATA_ADDR rec_address;
21351 
21352  /*
21353  * No relocation, can be updated in place
21354  */
21355 
21356  rec_address.pgptr = context->home_page_watcher_p->pgptr;
21357  rec_address.vfid = &context->hfid.vfid;
21358  rec_address.offset = context->oid.slotid;
21359  heap_mvcc_log_delete (thread_p, &rec_address, RVHF_MVCC_DELETE_REC_HOME);
21360 
21361  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21362 
21363  /* we'll update the home page with the built record, since it fits in home page */
21364  home_page_updated_recdes = &built_recdes;
21365 
21367  }
21368 
21369  /* update home page and check operation result */
21370  error_code =
21371  heap_update_physical (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21372  home_page_updated_recdes);
21373  if (error_code != NO_ERROR)
21374  {
21375  ASSERT_ERROR ();
21376  return error_code;
21377  }
21378 
21379  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21380  }
21381  else
21382  {
21383  bool is_reusable = heap_is_reusable_oid (context->file_type);
21384 
21385  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21386 
21387  /* log operation */
21388  heap_log_delete_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid, &context->oid,
21389  &context->home_recdes, is_reusable, NULL);
21390 
21391  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21392 
21393  /* physical deletion */
21394  error_code = heap_delete_physical (thread_p, &context->hfid, context->home_page_watcher_p->pgptr, &context->oid);
21395 
21396  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21397 
21399 
21400  assert (error_code == NO_ERROR || er_errid () != NO_ERROR);
21401  return error_code;
21402  }
21403 
21404  /* all ok */
21405  return NO_ERROR;
21406 }
21407 
21408 /*
21409  * heap_delete_physical () - physical deletion of a record
21410  * thread_p(in): thread entry
21411  * hfid_p(in): heap file identifier where record is located
21412  * page_p(in): page where record is stored
21413  * oid_p(in): object identifier of record
21414  */
21415 static int
21416 heap_delete_physical (THREAD_ENTRY * thread_p, HFID * hfid_p, PAGE_PTR page_p, OID * oid_p)
21417 {
21418  int free_space;
21419 
21420  /* check input */
21421  assert (hfid_p != NULL);
21422  assert (page_p != NULL);
21423  assert (oid_p != NULL);
21424  assert (oid_p->slotid != NULL_SLOTID);
21425 
21426  /* save old freespace */
21427  free_space = spage_get_free_space_without_saving (thread_p, page_p, NULL);
21428 
21429  /* physical deletion */
21430  if (spage_delete (thread_p, page_p, oid_p->slotid) == NULL_SLOTID)
21431  {
21432  return ER_FAILED;
21433  }
21434 
21435  /* update statistics */
21436  heap_stats_update (thread_p, page_p, hfid_p, free_space);
21437 
21438  /* mark page as dirty */
21439  pgbuf_set_dirty (thread_p, page_p, DONT_FREE);
21440 
21441  /* all ok */
21442  return NO_ERROR;
21443 }
21444 
21445 /*
21446  * heap_log_delete_physical () - log physical deletion
21447  * thread_p(in): thread entry
21448  * page_p(in): page pointer
21449  * vfid_p(in): virtual file identifier
21450  * oid_p(in): object identifier of deleted record
21451  * recdes_p(in): record descriptor of deleted record
21452  * mark_reusable(in): if true, will mark the slot as reusable
21453  * undo_lsa(out): lsa to the undo record; needed to set previous version lsa of record at update
21454  */
21455 static void
21456 heap_log_delete_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, VFID * vfid_p, OID * oid_p, RECDES * recdes_p,
21457  bool mark_reusable, LOG_LSA * undo_lsa)
21458 {
21459  LOG_DATA_ADDR log_addr;
21460 
21461  /* check input */
21462  assert (page_p != NULL);
21463  assert (vfid_p != NULL);
21464  assert (oid_p != NULL);
21465  assert (recdes_p != NULL);
21466 
21467  /* populate address */
21468  log_addr.offset = oid_p->slotid;
21469  log_addr.pgptr = page_p;
21470  log_addr.vfid = vfid_p;
21471 
21472  if (recdes_p->type == REC_ASSIGN_ADDRESS)
21473  {
21474  /* special case for REC_ASSIGN */
21475  RECDES temp_recdes;
21476  INT16 bytes_reserved;
21477 
21478  temp_recdes.type = recdes_p->type;
21479  temp_recdes.area_size = sizeof (bytes_reserved);
21480  temp_recdes.length = sizeof (bytes_reserved);
21481  bytes_reserved = (INT16) recdes_p->length;
21482  temp_recdes.data = (char *) &bytes_reserved;
21483 
21484  log_append_undoredo_recdes (thread_p, RVHF_DELETE, &log_addr, &temp_recdes, NULL);
21485  }
21486  else
21487  {
21488  /* log record descriptor */
21489  log_append_undoredo_recdes (thread_p, RVHF_DELETE, &log_addr, recdes_p, NULL);
21490  }
21491 
21492  if (undo_lsa)
21493  {
21494  /* get, set undo lsa before log_append_postpone() will make it inaccessible */
21495  LSA_COPY (undo_lsa, logtb_find_current_tran_lsa (thread_p));
21496  }
21497 
21498  /* log postponed operation */
21499  if (mark_reusable)
21500  {
21501  log_append_postpone (thread_p, RVHF_MARK_REUSABLE_SLOT, &log_addr, 0, NULL);
21502  }
21503 }
21504 
21505 /*
21506  * heap_update_bigone () - update a REC_BIGONE record
21507  * thread_p(in): thread entry
21508  * context(in): operation context
21509  * is_mvcc_op(in): type of operation (MVCC/non-MVCC)
21510  */
21511 static int
21512 heap_update_bigone (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op)
21513 {
21514  int error_code = NO_ERROR;
21515  bool is_old_home_updated;
21516  RECDES new_home_recdes;
21517 
21518  assert (context != NULL);
21519  assert (context->type == HEAP_OPERATION_UPDATE);
21520  assert (context->recdes_p != NULL);
21521  assert (context->home_page_watcher_p != NULL);
21522  assert (context->home_page_watcher_p->pgptr != NULL);
21523  assert (context->overflow_page_watcher_p != NULL);
21524 
21525  /* read OID of overflow record */
21526  context->ovf_oid = *((OID *) context->home_recdes.data);
21527 
21528  /* fix header page */
21529  error_code = heap_fix_header_page (thread_p, context);
21530  if (error_code != NO_ERROR)
21531  {
21532  ASSERT_ERROR ();
21533  goto exit;
21534  }
21535 
21536  HEAP_PERF_TRACK_PREPARE (thread_p, context);
21537 
21538  if (is_mvcc_op)
21539  {
21540  /* log old overflow record and set prev version lsa */
21541 
21542  /* This undo log record have two roles: 1) to keep the old record version; 2) to reach the record at undo
21543  * in order to check if it should have its insert id and prev version vacuumed; */
21544  RECDES ovf_recdes = RECDES_INITIALIZER;
21545  VPID ovf_vpid;
21546  PAGE_PTR first_pgptr;
21547 
21548  if (heap_get_bigone_content (thread_p, context->scan_cache_p, COPY, &context->ovf_oid, &ovf_recdes) != S_SUCCESS)
21549  {
21550  error_code = ER_FAILED;
21551  goto exit;
21552  }
21553 
21554  VPID_GET_FROM_OID (&ovf_vpid, &context->ovf_oid);
21555  first_pgptr = pgbuf_fix (thread_p, &ovf_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, PGBUF_UNCONDITIONAL_LATCH);
21556  if (first_pgptr == NULL)
21557  {
21558  error_code = ER_FAILED;
21559  goto exit;
21560  }
21561 
21562  /* actual logging */
21563  log_append_undo_recdes2 (thread_p, RVHF_MVCC_UPDATE_OVERFLOW, NULL, first_pgptr, -1, &ovf_recdes);
21564  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21565 
21566  pgbuf_set_dirty (thread_p, first_pgptr, FREE);
21567 
21568  /* set prev version lsa */
21570  }
21571 
21572  /* Proceed with the update. the new record is prepared and for mvcc it should have the prev version lsa set */
21573  if (heap_is_big_length (context->recdes_p->length))
21574  {
21575  /* overflow -> overflow update */
21576  is_old_home_updated = false;
21577 
21578  if (heap_ovf_update (thread_p, &context->hfid, &context->ovf_oid, context->recdes_p) == NULL)
21579  {
21580  ASSERT_ERROR_AND_SET (error_code);
21581  goto exit;
21582  }
21583  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21584 
21585  if (is_mvcc_op)
21586  {
21587  /* log home no change; vacuum needs it to reach the updated overflow record */
21588  LOG_DATA_ADDR log_addr;
21589 
21590  LOG_SET_DATA_ADDR (&log_addr, context->home_page_watcher_p->pgptr, &context->hfid.vfid, context->oid.slotid);
21591 
21592  heap_mvcc_log_home_no_change (thread_p, &log_addr);
21593 
21594  /* dirty home page because of logging */
21595  pgbuf_set_dirty (thread_p, context->home_page_watcher_p->pgptr, DONT_FREE);
21596  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21597  }
21598  }
21599  else if (spage_update (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, context->recdes_p) ==
21600  SP_SUCCESS)
21601  {
21602  /* overflow -> rec home update (new record fits in home page) */
21603  is_old_home_updated = true;
21604 
21605  /* update it's type in the page */
21606  context->record_type = context->recdes_p->type = REC_HOME;
21607  spage_update_record_type (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21608  context->recdes_p->type);
21609 
21610  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21611 
21612  new_home_recdes = *context->recdes_p;
21613 
21614  /* dirty home page */
21615  pgbuf_set_dirty (thread_p, context->home_page_watcher_p->pgptr, DONT_FREE);
21616  }
21617  else
21618  {
21619  /* overflow -> rec relocation update (home record will point to the new_home record) */
21620  OID newhome_oid;
21621 
21622  /* insert new home */
21623  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21624  context->recdes_p->type = REC_NEWHOME;
21625  error_code = heap_insert_newhome (thread_p, context, context->recdes_p, &newhome_oid, NULL);
21626  if (error_code != NO_ERROR)
21627  {
21628  ASSERT_ERROR ();
21629  goto exit;
21630  }
21631 
21632  /* prepare record descriptor */
21633  heap_build_forwarding_recdes (&new_home_recdes, REC_RELOCATION, &newhome_oid);
21634 
21635  /* update home */
21636  error_code = heap_update_physical (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
21637  &new_home_recdes);
21638  if (error_code != NO_ERROR)
21639  {
21640  ASSERT_ERROR ();
21641  goto exit;
21642  }
21643  is_old_home_updated = true;
21644 
21645  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21646  }
21647 
21648  if (is_old_home_updated)
21649  {
21650  /* log home update operation and remove old overflow record */
21651  heap_log_update_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid,
21652  &context->oid, &context->home_recdes, &new_home_recdes,
21653  (is_mvcc_op ? RVHF_UPDATE_NOTIFY_VACUUM : RVHF_UPDATE));
21654  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21655 
21656  /* the old overflow record is no longer needed, it was linked only by old home */
21657  if (heap_ovf_delete (thread_p, &context->hfid, &context->ovf_oid, NULL) == NULL)
21658  {
21659  ASSERT_ERROR_AND_SET (error_code);
21660  goto exit;
21661  }
21662  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21663  }
21664 
21665  /* location did not change */
21666  COPY_OID (&context->res_oid, &context->oid);
21667 
21669 
21670  /* Fall through to exit. */
21671 
21672 exit:
21673  return error_code;
21674 }
21675 
21676 /*
21677  * heap_update_relocation () - update a REC_RELOCATION/REC_NEWHOME combo
21678  * thread_p(in): thread entry
21679  * context(in): operation context
21680  * is_mvcc_op(in): type of operation (MVCC/non-MVCC)
21681  */
21682 static int
21683 heap_update_relocation (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op)
21684 {
21685  RECDES forward_recdes;
21686  char forward_recdes_buffer[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT];
21687  OID forward_oid;
21688  int rc;
21689  RECDES new_home_recdes;
21690  OID new_forward_oid;
21691  bool fits_in_home, fits_in_forward;
21692  bool update_old_home = false;
21693  bool update_old_forward = false;
21694  bool remove_old_forward = false;
21695  LOG_LSA prev_version_lsa = LSA_INITIALIZER;
21696  PGBUF_WATCHER newhome_pg_watcher; /* fwd pg watcher required for heap_update_set_prev_version() */
21697  PGBUF_WATCHER *newhome_pg_watcher_p = NULL;
21698 
21699  assert (context != NULL);
21700  assert (context->recdes_p != NULL);
21701  assert (context->type == HEAP_OPERATION_UPDATE);
21702  assert (context->home_page_watcher_p != NULL);
21703  assert (context->home_page_watcher_p->pgptr != NULL);
21704  assert (context->forward_page_watcher_p != NULL);
21705 
21706  /* get forward oid */
21707  forward_oid = *((OID *) context->home_recdes.data);
21708 
21709  /* fix forward page */
21710  rc = heap_fix_forward_page (thread_p, context, &forward_oid);
21711  if (rc != NO_ERROR)
21712  {
21713  ASSERT_ERROR ();
21714  goto exit;
21715  }
21716 
21717  /* fix header if necessary */
21718  fits_in_home =
21719  spage_is_updatable (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, context->recdes_p->length);
21720  fits_in_forward =
21721  spage_is_updatable (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid,
21722  context->recdes_p->length);
21723  if (heap_is_big_length (context->recdes_p->length) || (!fits_in_forward && !fits_in_home))
21724  {
21725  /* fix header page */
21726  rc = heap_fix_header_page (thread_p, context);
21727  if (rc != NO_ERROR)
21728  {
21729  ASSERT_ERROR ();
21730  goto exit;
21731  }
21732  }
21733 
21734  /* get forward record */
21735  forward_recdes.area_size = DB_PAGESIZE;
21736  forward_recdes.data = PTR_ALIGN (forward_recdes_buffer, MAX_ALIGNMENT);
21737  if (spage_get_record (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid, &forward_recdes, COPY) !=
21738  S_SUCCESS)
21739  {
21740  assert (false);
21741  ASSERT_ERROR_AND_SET (rc);
21742  goto exit;
21743  }
21744 
21745  HEAP_PERF_TRACK_PREPARE (thread_p, context);
21746 
21747  /* determine what operations on home/forward pages are necessary and execute extra operations for each case */
21748  if (heap_is_big_length (context->recdes_p->length))
21749  {
21750  /* insert new overflow record */
21751  if (heap_ovf_insert (thread_p, &context->hfid, &new_forward_oid, context->recdes_p) == NULL)
21752  {
21753  ASSERT_ERROR_AND_SET (rc);
21754  goto exit;
21755  }
21756 
21757  /* home record descriptor will be an overflow OID and will be placed in original home page */
21758  heap_build_forwarding_recdes (&new_home_recdes, REC_BIGONE, &new_forward_oid);
21759 
21760  /* remove old forward record */
21761  remove_old_forward = true;
21762  update_old_home = true;
21763 
21765  }
21766  else if (!fits_in_forward && !fits_in_home)
21767  {
21768  /* insert a new forward record */
21769 
21770  if (is_mvcc_op)
21771  {
21772  /* necessary later to set prev version, which is required only for mvcc objects */
21773  newhome_pg_watcher_p = &newhome_pg_watcher;
21775  }
21776 
21777  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21778  context->recdes_p->type = REC_NEWHOME;
21779  rc = heap_insert_newhome (thread_p, context, context->recdes_p, &new_forward_oid, newhome_pg_watcher_p);
21780  if (rc != NO_ERROR)
21781  {
21782  ASSERT_ERROR ();
21783  goto exit;
21784  }
21785 
21786  /* new home record will be a REC_RELOCATION and will be placed in the original home page */
21787  heap_build_forwarding_recdes (&new_home_recdes, REC_RELOCATION, &new_forward_oid);
21788 
21789  /* remove old forward record */
21790  remove_old_forward = true;
21791  update_old_home = true;
21792 
21794  }
21795  else if (fits_in_home)
21796  {
21797  /* updated forward record fits in home page */
21798  context->recdes_p->type = REC_HOME;
21799  new_home_recdes = *context->recdes_p;
21800 
21801  /* remove old forward record */
21802  remove_old_forward = true;
21803  update_old_home = true;
21804 
21806  }
21807  else if (fits_in_forward)
21808  {
21809  /* updated forward record fits in old forward page */
21810  context->recdes_p->type = REC_NEWHOME;
21811 
21812  /* home record will not be touched */
21813  update_old_forward = true;
21814 
21816  }
21817  else
21818  {
21819  /* impossible case */
21820  assert (false);
21821  rc = ER_FAILED;
21822  goto exit;
21823  }
21824 
21825  /* The old rec_newhome must be removed or updated */
21826  assert (remove_old_forward != update_old_forward);
21827  /* Remove rec_newhome only in case of old_home update */
21828  assert (remove_old_forward == update_old_home);
21829 
21830  /*
21831  * Update old home record (if necessary)
21832  */
21833  if (update_old_home)
21834  {
21835  /* log operation */
21836  heap_log_update_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid, &context->oid,
21837  &context->home_recdes, &new_home_recdes,
21838  (is_mvcc_op ? RVHF_UPDATE_NOTIFY_VACUUM : RVHF_UPDATE));
21839  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21840 
21841  /* update home record */
21842  rc = heap_update_physical (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, &new_home_recdes);
21843  if (rc != NO_ERROR)
21844  {
21845  ASSERT_ERROR ();
21846  goto exit;
21847  }
21848  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21849  }
21850 
21851  /*
21852  * Delete old forward record (if necessary)
21853  */
21854  if (remove_old_forward)
21855  {
21856  assert (context->forward_page_watcher_p != NULL && context->forward_page_watcher_p->pgptr != NULL);
21857  if ((new_home_recdes.type == REC_RELOCATION || new_home_recdes.type == REC_BIGONE)
21859  {
21860  /*
21861  * Need to get the record again, since the record may have changed by other concurrent
21862  * transactions (INSID removed by VACUUM).
21863  */
21864  if (spage_get_record (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid, &forward_recdes,
21865  COPY) != S_SUCCESS)
21866  {
21867  assert (false);
21868  ASSERT_ERROR_AND_SET (rc);
21869  goto exit;
21870  }
21871  HEAP_PERF_TRACK_PREPARE (thread_p, context);
21872  }
21873 
21874 
21875  /* log operation */
21876  heap_log_delete_physical (thread_p, context->forward_page_watcher_p->pgptr, &context->hfid.vfid, &forward_oid,
21877  &forward_recdes, true, &prev_version_lsa);
21878  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21879 
21880  /* physical removal of forward record */
21881  rc = heap_delete_physical (thread_p, &context->hfid, context->forward_page_watcher_p->pgptr, &forward_oid);
21882  if (rc != NO_ERROR)
21883  {
21884  ASSERT_ERROR ();
21885  goto exit;
21886  }
21887  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21888  }
21889 
21890  /*
21891  * Update old forward record (if necessary)
21892  */
21893  if (update_old_forward)
21894  {
21895  /* log operation */
21896  heap_log_update_physical (thread_p, context->forward_page_watcher_p->pgptr, &context->hfid.vfid, &forward_oid,
21897  &forward_recdes, context->recdes_p, RVHF_UPDATE);
21898  LSA_COPY (&prev_version_lsa, logtb_find_current_tran_lsa (thread_p));
21899 
21900  if (is_mvcc_op)
21901  {
21902  LOG_DATA_ADDR p_addr;
21903 
21904  p_addr.pgptr = context->home_page_watcher_p->pgptr;
21905  p_addr.vfid = &context->hfid.vfid;
21906  p_addr.offset = context->oid.slotid;
21907 
21908  /* home remains untouched, log no_change on home to notify vacuum */
21909  heap_mvcc_log_home_no_change (thread_p, &p_addr);
21910 
21911  /* Even though home record is not modified, vacuum status of the page might be changed. */
21912  pgbuf_set_dirty (thread_p, context->home_page_watcher_p->pgptr, DONT_FREE);
21913  }
21914 
21915  HEAP_PERF_TRACK_LOGGING (thread_p, context);
21916 
21917  /* physical update of forward record */
21918  rc = heap_update_physical (thread_p, context->forward_page_watcher_p->pgptr, forward_oid.slotid,
21919  context->recdes_p);
21920  if (rc != NO_ERROR)
21921  {
21922  ASSERT_ERROR ();
21923  goto exit;
21924  }
21925 
21926  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
21927  }
21928 
21929  if (is_mvcc_op)
21930  {
21931  /* the updated record needs the prev version lsa to the undo log record where the old record can be found */
21932  rc = heap_update_set_prev_version (thread_p, &context->oid, context->home_page_watcher_p,
21933  newhome_pg_watcher_p ? newhome_pg_watcher_p : context->forward_page_watcher_p,
21934  &prev_version_lsa);
21935 
21936  if (rc != NO_ERROR)
21937  {
21938  ASSERT_ERROR ();
21939  goto exit;
21940  }
21941  }
21942 
21943  /* location did not change */
21944  COPY_OID (&context->res_oid, &context->oid);
21945 
21946 exit:
21947 
21948  if (newhome_pg_watcher_p != NULL && newhome_pg_watcher_p->pgptr != NULL)
21949  {
21950  /* newhome_pg_watcher is used only locally; must be unfixed */
21951  pgbuf_ordered_unfix (thread_p, newhome_pg_watcher_p);
21952  }
21953 
21954  return rc;
21955 }
21956 
21957 /*
21958  * heap_update_home () - update a REC_HOME record
21959  * thread_p(in): thread entry
21960  * context(in): operation context
21961  * is_mvcc_op(in): type of operation (MVCC/non-MVCC)
21962  */
21963 static int
21964 heap_update_home (THREAD_ENTRY * thread_p, HEAP_OPERATION_CONTEXT * context, bool is_mvcc_op)
21965 {
21966  int error_code = NO_ERROR;
21967  RECDES forwarding_recdes;
21968  RECDES *home_page_updated_recdes_p = NULL;
21969  OID forward_oid;
21970  LOG_RCVINDEX undo_rcvindex;
21971  LOG_LSA prev_version_lsa;
21972  PGBUF_WATCHER newhome_pg_watcher; /* fwd pg watcher required for heap_update_set_prev_version() */
21973  PGBUF_WATCHER *newhome_pg_watcher_p = NULL;
21974 
21975  assert (context != NULL);
21976  assert (context->recdes_p != NULL);
21977  assert (context->type == HEAP_OPERATION_UPDATE);
21978  assert (context->home_page_watcher_p != NULL);
21979  assert (context->home_page_watcher_p->pgptr != NULL);
21980  assert (context->forward_page_watcher_p != NULL);
21981 
21983  {
21984  /* updating a REC_ASSIGN_ADDRESS should be done as a non-mvcc operation */
21985  assert (false);
21986 #if defined(CUBRID_DEBUG)
21988  "heap_update_home: ** SYSTEM_ERROR ** update"
21989  " mvcc update was attempted on REC_ASSIGN_ADDRESS home record");
21990 #endif
21991  error_code = ER_FAILED;
21992  goto exit;
21993  }
21994 
21995 #if defined (SERVER_MODE)
21996  if (is_mvcc_op)
21997  {
21998  undo_rcvindex = RVHF_UPDATE_NOTIFY_VACUUM;
21999  }
22000  else if (context->home_recdes.type == REC_ASSIGN_ADDRESS && !mvcc_is_mvcc_disabled_class (&context->class_oid))
22001  {
22002  /* Quick fix: Assign address is update in-place. Vacuum must be notified. */
22003  undo_rcvindex = RVHF_UPDATE_NOTIFY_VACUUM;
22004  }
22005  else
22006 #endif /* SERVER_MODE */
22007  {
22008  undo_rcvindex = RVHF_UPDATE;
22009  }
22010 
22011  if (heap_is_big_length (context->recdes_p->length))
22012  {
22013  /* fix header page */
22014  error_code = heap_fix_header_page (thread_p, context);
22015  if (error_code != NO_ERROR)
22016  {
22017  ASSERT_ERROR ();
22018  goto exit;
22019  }
22020 
22021  /* insert new overflow record */
22022  HEAP_PERF_TRACK_PREPARE (thread_p, context);
22023  if (heap_ovf_insert (thread_p, &context->hfid, &forward_oid, context->recdes_p) == NULL)
22024  {
22025  ASSERT_ERROR_AND_SET (error_code);
22026  goto exit;
22027  }
22028 
22029  /* forwarding record is REC_BIGONE */
22030  heap_build_forwarding_recdes (&forwarding_recdes, REC_BIGONE, &forward_oid);
22031 
22032  /* we'll be updating home with forwarding record */
22033  home_page_updated_recdes_p = &forwarding_recdes;
22034 
22036  }
22037  else if (!spage_is_updatable (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
22038  context->recdes_p->length))
22039  {
22040  /* insert new home */
22041 
22042  if (is_mvcc_op)
22043  {
22044  /* necessary later to set prev version, which is required only for mvcc objects */
22045  newhome_pg_watcher_p = &newhome_pg_watcher;
22047  }
22048 
22049  /* fix header page */
22050  error_code = heap_fix_header_page (thread_p, context);
22051  if (error_code != NO_ERROR)
22052  {
22053  ASSERT_ERROR ();
22054  goto exit;
22055  }
22056 
22057  /* insert new home record */
22058  HEAP_PERF_TRACK_PREPARE (thread_p, context);
22059  context->recdes_p->type = REC_NEWHOME;
22060  error_code = heap_insert_newhome (thread_p, context, context->recdes_p, &forward_oid, newhome_pg_watcher_p);
22061  if (error_code != NO_ERROR)
22062  {
22063  ASSERT_ERROR ();
22064  goto exit;
22065  }
22066 
22067  /* forwarding record is REC_RELOCATION */
22068  heap_build_forwarding_recdes (&forwarding_recdes, REC_RELOCATION, &forward_oid);
22069 
22070  /* we'll be updating home with forwarding record */
22071  home_page_updated_recdes_p = &forwarding_recdes;
22072 
22074  }
22075  else
22076  {
22077  context->recdes_p->type = REC_HOME;
22078 
22079  /* updated record fits in home page */
22080  home_page_updated_recdes_p = context->recdes_p;
22081 
22083  }
22084 
22085  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
22086 
22087  if ((home_page_updated_recdes_p->type == REC_RELOCATION || home_page_updated_recdes_p->type == REC_BIGONE)
22089  {
22090  /*
22091  * Need to get the record again, since record may have changed
22092  * by other transactions (INSID removed by VACUUM, page compact).
22093  * The object was already locked, so the record size may be the
22094  * same or smaller (INSID removed by VACUUM).
22095  */
22096  int is_peeking = (context->home_recdes.area_size >= context->home_recdes.length) ? COPY : PEEK;
22097  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, &context->home_recdes,
22098  is_peeking) != S_SUCCESS)
22099  {
22100  ASSERT_ERROR_AND_SET (error_code);
22101  goto exit;
22102  }
22103  HEAP_PERF_TRACK_PREPARE (thread_p, context);
22104  }
22105 
22106  /* log home update */
22107  heap_log_update_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid, &context->oid,
22108  &context->home_recdes, home_page_updated_recdes_p, undo_rcvindex);
22109  LSA_COPY (&prev_version_lsa, logtb_find_current_tran_lsa (thread_p));
22110 
22111  HEAP_PERF_TRACK_LOGGING (thread_p, context);
22112 
22113  /* physical update of home record */
22114  error_code =
22115  heap_update_physical (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid,
22116  home_page_updated_recdes_p);
22117  if (error_code != NO_ERROR)
22118  {
22119  assert (false);
22120  ASSERT_ERROR ();
22121  goto exit;
22122  }
22123 
22124  if (is_mvcc_op)
22125  {
22126  /* the updated record needs the prev version lsa to the undo log record where the old record can be found */
22127  error_code = heap_update_set_prev_version (thread_p, &context->oid, context->home_page_watcher_p,
22128  newhome_pg_watcher_p, &prev_version_lsa);
22129  if (error_code != NO_ERROR)
22130  {
22131  ASSERT_ERROR ();
22132  goto exit;
22133  }
22134  }
22135 
22136  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
22137 
22138  /* location did not change */
22139  COPY_OID (&context->res_oid, &context->oid);
22140 
22141  /* Fall through to exit. */
22142 
22143 exit:
22144 
22145  if (newhome_pg_watcher_p != NULL && newhome_pg_watcher_p->pgptr != NULL)
22146  {
22147  /* newhome_pg_watcher is used only locally; must be unfixed */
22148  pgbuf_ordered_unfix (thread_p, newhome_pg_watcher_p);
22149  }
22150 
22151  return error_code;
22152 }
22153 
22154 /*
22155  * heap_update_physical () - physically update a record
22156  * thread_p(in): thread entry
22157  * page_p(in): page where record is stored
22158  * slot_id(in): slot where record is stored within page
22159  * recdes_p(in): record descriptor of updated record
22160  * returns: error code or NO_ERROR
22161  */
22162 static int
22163 heap_update_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, short slot_id, RECDES * recdes_p)
22164 {
22165  int scancode;
22166  INT16 old_record_type;
22167 
22168  /* check input */
22169  assert (page_p != NULL);
22170  assert (recdes_p != NULL);
22171  assert (slot_id != NULL_SLOTID);
22172 
22173  /* retrieve current record type */
22174  old_record_type = spage_get_record_type (page_p, slot_id);
22175 
22176  /* update home page and check operation result */
22177  scancode = spage_update (thread_p, page_p, slot_id, recdes_p);
22178  if (scancode != SP_SUCCESS)
22179  {
22180  /*
22181  * This is likely a system error since we have already checked
22182  * for space.
22183  */
22184  assert (false);
22185  if (scancode != SP_ERROR)
22186  {
22188  }
22189 
22190 #if defined(CUBRID_DEBUG)
22192  "heap_update_physical: ** SYSTEM_ERROR ** update operation failed even when have already checked"
22193  " for space");
22194 #endif
22195 
22196  return ER_FAILED;
22197  }
22198 
22199  /* Reflect record type change */
22200  if (old_record_type != recdes_p->type)
22201  {
22202  spage_update_record_type (thread_p, page_p, slot_id, recdes_p->type);
22203  }
22204 
22205  /* mark as dirty */
22206  pgbuf_set_dirty (thread_p, page_p, DONT_FREE);
22207 
22208  /* all ok */
22209  return NO_ERROR;
22210 }
22211 
22212 /*
22213  * heap_log_update_physical () - log a physical update
22214  * thread_p(in): thread entry
22215  * page_p(in): updated page
22216  * vfid_p(in): virtual file id
22217  * oid_p(in): object id
22218  * old_recdes_p(in): old record
22219  * new_recdes_p(in): new record
22220  * rcvindex(in): Index to recovery function
22221  */
22222 static void
22223 heap_log_update_physical (THREAD_ENTRY * thread_p, PAGE_PTR page_p, VFID * vfid_p, OID * oid_p, RECDES * old_recdes_p,
22224  RECDES * new_recdes_p, LOG_RCVINDEX rcvindex)
22225 {
22226  LOG_DATA_ADDR address;
22227 
22228  /* build address */
22229  address.offset = oid_p->slotid;
22230  address.pgptr = page_p;
22231  address.vfid = vfid_p;
22232 
22233  /* actual logging */
22234  if (LOG_IS_MVCC_HEAP_OPERATION (rcvindex))
22235  {
22236  HEAP_PAGE_VACUUM_STATUS vacuum_status = heap_page_get_vacuum_status (thread_p, page_p);
22237  heap_page_update_chain_after_mvcc_op (thread_p, page_p, logtb_get_current_mvccid (thread_p));
22238  if (heap_page_get_vacuum_status (thread_p, page_p) != vacuum_status)
22239  {
22240  /* Mark vacuum status change for recovery. */
22242  }
22243  }
22244 
22245  log_append_undoredo_recdes (thread_p, rcvindex, &address, old_recdes_p, new_recdes_p);
22246 }
22247 
22248 /*
22249  * heap_create_insert_context () - create an insertion context
22250  * context(in): context to set up
22251  * hfid_p(in): heap file identifier
22252  * class_oid_p(in): class OID
22253  * recdes_p(in): record descriptor to insert
22254  * scancache_p(in): scan cache to use (optional)
22255  */
22256 void
22257 heap_create_insert_context (HEAP_OPERATION_CONTEXT * context, HFID * hfid_p, OID * class_oid_p, RECDES * recdes_p,
22258  HEAP_SCANCACHE * scancache_p)
22259 {
22260  assert (context != NULL);
22261  assert (hfid_p != NULL);
22262  assert (recdes_p != NULL);
22263 
22264  heap_clear_operation_context (context, hfid_p);
22265  if (class_oid_p != NULL)
22266  {
22267  COPY_OID (&context->class_oid, class_oid_p);
22268  }
22269  context->recdes_p = recdes_p;
22270  context->scan_cache_p = scancache_p;
22271  context->type = HEAP_OPERATION_INSERT;
22272 }
22273 
22274 /*
22275  * heap_create_delete_context () - create a deletion context
22276  * context(in): context to set up
22277  * hfid_p(in): heap file identifier
22278  * oid(in): identifier of object to delete
22279  * class_oid_p(in): class OID
22280  * scancache_p(in): scan cache to use (optional)
22281  */
22282 void
22283 heap_create_delete_context (HEAP_OPERATION_CONTEXT * context, HFID * hfid_p, OID * oid_p, OID * class_oid_p,
22284  HEAP_SCANCACHE * scancache_p)
22285 {
22286  assert (context != NULL);
22287  assert (hfid_p != NULL);
22288  assert (oid_p != NULL);
22289  assert (class_oid_p != NULL);
22290 
22291  heap_clear_operation_context (context, hfid_p);
22292  COPY_OID (&context->oid, oid_p);
22293  COPY_OID (&context->class_oid, class_oid_p);
22294  context->scan_cache_p = scancache_p;
22295  context->type = HEAP_OPERATION_DELETE;
22296 }
22297 
22298 /*
22299  * heap_create_update_context () - create an update operation context
22300  * context(in): context to set up
22301  * hfid_p(in): heap file identifier
22302  * oid(in): identifier of object to delete
22303  * class_oid_p(in): class OID
22304  * recdes_p(in): updated record to write
22305  * scancache_p(in): scan cache to use (optional)
22306  * in_place(in): specifies if the "in place" type of the update operation
22307  */
22308 void
22309 heap_create_update_context (HEAP_OPERATION_CONTEXT * context, HFID * hfid_p, OID * oid_p, OID * class_oid_p,
22310  RECDES * recdes_p, HEAP_SCANCACHE * scancache_p, UPDATE_INPLACE_STYLE in_place)
22311 {
22312  assert (context != NULL);
22313  assert (hfid_p != NULL);
22314  assert (oid_p != NULL);
22315  assert (class_oid_p != NULL);
22316  assert (recdes_p != NULL);
22317 
22318  heap_clear_operation_context (context, hfid_p);
22319  COPY_OID (&context->oid, oid_p);
22320  COPY_OID (&context->class_oid, class_oid_p);
22321  context->recdes_p = recdes_p;
22322  context->scan_cache_p = scancache_p;
22323  context->type = HEAP_OPERATION_UPDATE;
22324  context->update_in_place = in_place;
22325 }
22326 
22327 /*
22328  * heap_insert_logical () - Insert an object onto heap
22329  * context(in/out): operation context
22330  * return: error code or NO_ERROR
22331  *
22332  * Note: Insert an object onto the given file heap. The object is
22333  * inserted using the following algorithm:
22334  * 1: If the object cannot be inserted in a single page, it is
22335  * inserted in overflow as a multipage object. An overflow
22336  * relocation record is created in the heap as an address map
22337  * to the actual content of the object (the overflow address).
22338  * 2: If the object can be inserted in the last allocated page
22339  * without overpassing the reserved space on the page, the
22340  * object is placed on this page.
22341  * 3: If the object can be inserted in the hinted page without
22342  * overpassing the reserved space on the page, the object is
22343  * placed on this page.
22344  * 4: The object is inserted in a newly allocated page. Don't
22345  * about reserve space here.
22346  *
22347  * NOTE-1: The class object was already IX-locked during compile time
22348  * under normal situation.
22349  * However, with prepare-execute-commit-execute-... scenario,
22350  * the class object is not properly IX-locked since the previous
22351  * commit released the entire acquired locks including IX-lock.
22352  * So we have to make it sure the class object is IX-locked at this
22353  * moment.
22354  */
22355 int
22357 {
22358  bool is_mvcc_op;
22359  int rc = NO_ERROR;
22360  PERF_UTIME_TRACKER time_track;
22361  bool is_mvcc_class;
22362 
22363  /* check required input */
22364  assert (context != NULL);
22365  assert (context->type == HEAP_OPERATION_INSERT);
22366  assert (context->recdes_p != NULL);
22367  assert (!HFID_IS_NULL (&context->hfid));
22368 
22369  context->time_track = &time_track;
22370  HEAP_PERF_START (thread_p, context);
22371 
22372  /* check scancache */
22373  if (heap_scancache_check_with_hfid (thread_p, &context->hfid, &context->class_oid, &context->scan_cache_p) !=
22374  NO_ERROR)
22375  {
22376  return ER_FAILED;
22377  }
22378 
22379  is_mvcc_class = !mvcc_is_mvcc_disabled_class (&context->class_oid);
22380  /*
22381  * Determine type of operation
22382  */
22383 #if defined (SERVER_MODE)
22384  if (is_mvcc_class && context->recdes_p->type != REC_ASSIGN_ADDRESS)
22385  {
22386  is_mvcc_op = true;
22387  }
22388  else
22389  {
22390  is_mvcc_op = false;
22391  }
22392 #else /* SERVER_MODE */
22393  is_mvcc_op = false;
22394 #endif /* SERVER_MODE */
22395 
22396  /*
22397  * Record header adjustments
22398  */
22399  if (!OID_ISNULL (&context->class_oid) && !OID_IS_ROOTOID (&context->class_oid)
22400  && context->recdes_p->type != REC_ASSIGN_ADDRESS)
22401  {
22402  if (heap_insert_adjust_recdes_header (thread_p, context, is_mvcc_class) != NO_ERROR)
22403  {
22404  return ER_FAILED;
22405  }
22406  }
22407 
22408 #if defined(ENABLE_SYSTEMTAP)
22409  CUBRID_OBJ_INSERT_START (&context->class_oid);
22410 #endif /* ENABLE_SYSTEMTAP */
22411 
22412  /*
22413  * Handle multipage object
22414  */
22415  if (heap_insert_handle_multipage_record (thread_p, context) != NO_ERROR)
22416  {
22417  rc = ER_FAILED;
22418  goto error;
22419  }
22420 
22421  /*
22422  * Locking
22423  */
22424  /* make sure we have IX_LOCK on class see [NOTE-1] */
22425  if (lock_object (thread_p, &context->class_oid, oid_Root_class_oid, IX_LOCK, LK_UNCOND_LOCK) != LK_GRANTED)
22426  {
22427  return ER_FAILED;
22428  }
22429 
22430  /* get insert location (includes locking) */
22431  if (heap_get_insert_location_with_lock (thread_p, context, NULL) != NO_ERROR)
22432  {
22433  return ER_FAILED;
22434  }
22435 
22436  HEAP_PERF_TRACK_PREPARE (thread_p, context);
22437 
22438  /*
22439  * Physical insertion
22440  */
22441  if (heap_insert_physical (thread_p, context) != NO_ERROR)
22442  {
22443  rc = ER_FAILED;
22444  goto error;
22445  }
22446 
22447  HEAP_PERF_TRACK_EXECUTE (thread_p, context);
22448 
22449  /*
22450  * Operation logging
22451  */
22452  heap_log_insert_physical (thread_p, context->home_page_watcher_p->pgptr, &context->hfid.vfid, &context->res_oid,
22453  context->recdes_p, is_mvcc_op, context->is_redistribute_insert_with_delid);
22454 
22455  HEAP_PERF_TRACK_LOGGING (thread_p, context);
22456 
22457  /* mark insert page as dirty */
22458  pgbuf_set_dirty (thread_p, context->home_page_watcher_p->pgptr, DONT_FREE);
22459 
22460  /*
22461  * Page unfix or caching
22462  */
22463  if (context->scan_cache_p != NULL && context->home_page_watcher_p == &context->home_page_watcher
22464  && context->scan_cache_p->cache_last_fix_page == true)
22465  {
22466  /* cache */
22467  assert (context->home_page_watcher_p->pgptr != NULL);
22468  pgbuf_replace_watcher (thread_p, context->home_page_watcher_p, &context->scan_cache_p->page_watcher);
22469  }
22470  else
22471  {
22472  /* unfix */
22473  pgbuf_ordered_unfix (thread_p, context->home_page_watcher_p);
22474  }
22475 
22476  /* unfix other pages */
22477  heap_unfix_watchers (thread_p, context);
22478 
22479  /*
22480  * Class creation case
22481  */
22482  if (context->recdes_p->type != REC_ASSIGN_ADDRESS && HFID_EQ ((&context->hfid), &(heap_Classrepr->rootclass_hfid)))
22483  {
22484  if (heap_mark_class_as_modified (thread_p, &context->res_oid, or_chn (context->recdes_p), false) != NO_ERROR)
22485  {
22486  rc = ER_FAILED;
22487  goto error;
22488  }
22489  }
22490 
22491  if (context->recdes_p->type == REC_HOME)
22492  {
22494  }
22495  else if (context->recdes_p->type == REC_BIGONE)
22496  {
22498  }
22499  else
22500  {
22502  }
22503 
22504 error:
22505 
22506 #if defined(ENABLE_SYSTEMTAP)
22507  CUBRID_OBJ_INSERT_END (&context->class_oid, (rc < 0));
22508 #endif /* ENABLE_SYSTEMTAP */
22509 
22510  /* all ok */
22511  return rc;
22512 }
22513 
22514 /*
22515  * heap_delete_logical () - Delete an object from heap file
22516  * thread_p(in): thread entry
22517  * context(in): operation context
22518  * return: error code or NO_ERROR
22519  *
22520  * Note: Delete the object associated with the given OID from the given
22521  * heap file. If the object has been relocated or stored in
22522  * overflow, both the relocation and the relocated record are deleted.
22523  */
22524 int
22526 {
22527  bool is_mvcc_op;
22528  int rc = NO_ERROR;
22529  PERF_UTIME_TRACKER time_track;
22530 
22531  /*
22532  * Check input
22533  */
22534  assert (context != NULL);
22535  assert (context->type == HEAP_OPERATION_DELETE);
22536  assert (!HFID_IS_NULL (&context->hfid));
22537  assert (!OID_ISNULL (&context->oid));
22538 
22539  context->time_track = &time_track;
22540  HEAP_PERF_START (thread_p, context);
22541 
22542  /* check input OID validity */
22543  if (heap_is_valid_oid (thread_p, &context->oid) != NO_ERROR)
22544  {
22545  return ER_FAILED;
22546  }
22547 
22548  /* check scancache */
22549  if (heap_scancache_check_with_hfid (thread_p, &context->hfid, &context->class_oid, &context->scan_cache_p) !=
22550  NO_ERROR)
22551  {
22552  return ER_FAILED;
22553  }
22554 
22555  /* check file type */
22556  context->file_type = heap_get_file_type (thread_p, context);
22557  if (context->file_type != FILE_HEAP && context->file_type != FILE_HEAP_REUSE_SLOTS)
22558  {
22559  if (context->file_type == FILE_UNKNOWN_TYPE)
22560  {
22561  ASSERT_ERROR_AND_SET (rc);
22562  if (rc == ER_INTERRUPTED)
22563  {
22564  return rc;
22565  }
22566  }
22568  return ER_FAILED;
22569  }
22570 
22571  /*
22572  * Class deletion case
22573  */
22574  if (HFID_EQ (&context->hfid, &(heap_Classrepr->rootclass_hfid)))
22575  {
22576  if (heap_mark_class_as_modified (thread_p, &context->oid, NULL_CHN, true) != NO_ERROR)
22577  {
22578  return ER_FAILED;
22579  }
22580  }
22581 
22582  /*
22583  * Determine type of operation
22584  */
22585 #if defined (SERVER_MODE)
22586  if (mvcc_is_mvcc_disabled_class (&context->class_oid))
22587  {
22588  is_mvcc_op = false;
22589  }
22590  else
22591  {
22592  is_mvcc_op = true;
22593  }
22594 #else /* SERVER_MODE */
22595  is_mvcc_op = false;
22596 #endif /* SERVER_MODE */
22597 
22598 #if defined(ENABLE_SYSTEMTAP)
22599  CUBRID_OBJ_DELETE_START (&context->class_oid);
22600 #endif /* ENABLE_SYSTEMTAP */
22601 
22602  /*
22603  * Fetch object's page and check record type
22604  */
22605  if (heap_get_record_location (thread_p, context) != NO_ERROR)
22606  {
22607  rc = ER_FAILED;
22608  goto error;
22609  }
22610 
22611  context->record_type = spage_get_record_type (context->home_page_watcher_p->pgptr, context->oid.slotid);
22612  if (context->record_type == REC_UNKNOWN)
22613  {
22615  context->oid.slotid);
22616  rc = ER_FAILED;
22617  goto error;
22618  }
22619 
22620  /* fetch record to be deleted */
22621  context->home_recdes.area_size = DB_PAGESIZE;
22623  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, &context->home_recdes, COPY)
22624  != S_SUCCESS)
22625  {
22626  rc = ER_FAILED;
22627  goto error;
22628  }
22629 
22630  HEAP_PERF_TRACK_PREPARE (thread_p, context);
22631 
22632  /*
22633  * Physical deletion and logging
22634  */
22635  switch (context->record_type)
22636  {
22637  case REC_BIGONE:
22638  rc = heap_delete_bigone (thread_p, context, is_mvcc_op);
22639  break;
22640 
22641  case REC_RELOCATION:
22642  rc = heap_delete_relocation (thread_p, context, is_mvcc_op);
22643  break;
22644 
22645  case REC_HOME:
22646  case REC_ASSIGN_ADDRESS:
22647  rc = heap_delete_home (thread_p, context, is_mvcc_op);
22648  break;
22649 
22650  default:
22652  context->oid.slotid);
22653  rc = ER_FAILED;
22654  goto error;
22655  }
22656 
22657 
22658 error:
22659 
22660  /* unfix or keep home page */
22661  if (context->scan_cache_p != NULL && context->home_page_watcher_p == &context->home_page_watcher
22662  && context->scan_cache_p->cache_last_fix_page == true)
22663  {
22664  pgbuf_replace_watcher (thread_p, context->home_page_watcher_p, &context->scan_cache_p->page_watcher);
22665  }
22666  else
22667  {
22668  if (context->home_page_watcher_p->pgptr != NULL)
22669  {
22670  pgbuf_ordered_unfix (thread_p, context->home_page_watcher_p);
22671  }
22672  }
22673 
22674  /* unfix pages */
22675  heap_unfix_watchers (thread_p, context);
22676 
22677 #if defined(ENABLE_SYSTEMTAP)
22678  CUBRID_OBJ_DELETE_END (&context->class_oid, (rc != NO_ERROR));
22679 #endif /* ENABLE_SYSTEMTAP */
22680 
22681  return rc;
22682 }
22683 
22684 /*
22685  * heap_update_logical () - update a record in a heap file
22686  * thread_p(in): thread entry
22687  * context(in): operation context
22688  * return: error code or NO_ERROR
22689  */
22690 extern int
22692 {
22693  bool is_mvcc_op;
22694  int rc = NO_ERROR;
22695  PERF_UTIME_TRACKER time_track;
22696  bool is_mvcc_class;
22697 
22698  /*
22699  * Check input
22700  */
22701  assert (context != NULL);
22702  assert (context->type == HEAP_OPERATION_UPDATE);
22703  assert (!OID_ISNULL (&context->oid));
22704  assert (!OID_ISNULL (&context->class_oid));
22705 
22706  context->time_track = &time_track;
22707  HEAP_PERF_START (thread_p, context);
22708 
22709  /* check scancache */
22710  rc = heap_scancache_check_with_hfid (thread_p, &context->hfid, &context->class_oid, &context->scan_cache_p);
22711  if (rc != NO_ERROR)
22712  {
22713  ASSERT_ERROR ();
22714  return rc;
22715  }
22716 
22717  /* check file type */
22718  context->file_type = heap_get_file_type (thread_p, context);
22719  if (context->file_type != FILE_HEAP && context->file_type != FILE_HEAP_REUSE_SLOTS)
22720  {
22721  if (context->file_type == FILE_UNKNOWN_TYPE)
22722  {
22723  ASSERT_ERROR_AND_SET (rc);
22724  if (rc == ER_INTERRUPTED)
22725  {
22726  return rc;
22727  }
22728  }
22730  return ER_GENERIC_ERROR;
22731  }
22732 
22733  /* get heap file identifier from scancache if none was provided */
22734  if (HFID_IS_NULL (&context->hfid))
22735  {
22736  if (context->scan_cache_p != NULL)
22737  {
22738  HFID_COPY (&context->hfid, &context->scan_cache_p->node.hfid);
22739  }
22740  else
22741  {
22742  er_log_debug (ARG_FILE_LINE, "heap_update: Bad interface a heap is needed");
22744  assert (false);
22745  return ER_HEAP_UNKNOWN_HEAP;
22746  }
22747  }
22748 
22749  /* check provided object identifier */
22750  rc = heap_is_valid_oid (thread_p, &context->oid);
22751  if (rc != NO_ERROR)
22752  {
22753  ASSERT_ERROR ();
22754  return rc;
22755  }
22756 
22757  /* by default, consider it old */
22758  context->is_logical_old = true;
22759 
22760  is_mvcc_class = !mvcc_is_mvcc_disabled_class (&context->class_oid);
22761  /*
22762  * Determine type of operation
22763  */
22764  is_mvcc_op = HEAP_UPDATE_IS_MVCC_OP (is_mvcc_class, context->update_in_place);
22765 #if defined (SERVER_MODE)
22766  assert ((!is_mvcc_op && HEAP_IS_UPDATE_INPLACE (context->update_in_place))
22767  || (is_mvcc_op && !HEAP_IS_UPDATE_INPLACE (context->update_in_place)));
22768  /* the update in place concept should be changed in terms of mvcc */
22769 #endif /* SERVER_MODE */
22770 
22771 #if defined(ENABLE_SYSTEMTAP)
22772  CUBRID_OBJ_UPDATE_START (&context->class_oid);
22773 #endif /* ENABLE_SYSTEMTAP */
22774 
22775  /*
22776  * Get location
22777  */
22778  rc = heap_get_record_location (thread_p, context);
22779  if (rc != NO_ERROR)
22780  {
22781  ASSERT_ERROR ();
22782  goto exit;
22783  }
22784 
22785  /* decache guessed representation */
22786  HEAP_MAYNEED_DECACHE_GUESSED_LASTREPRS (&context->oid, &context->hfid);
22787 
22788  /*
22789  * Fetch record
22790  */
22791  context->record_type = spage_get_record_type (context->home_page_watcher_p->pgptr, context->oid.slotid);
22792  if (context->record_type == REC_UNKNOWN)
22793  {
22795  context->oid.slotid);
22797  goto exit;
22798  }
22799 
22800  context->home_recdes.area_size = DB_PAGESIZE;
22802  if (spage_get_record (thread_p, context->home_page_watcher_p->pgptr, context->oid.slotid, &context->home_recdes, COPY)
22803  != S_SUCCESS)
22804  {
22805  rc = ER_FAILED;
22806  goto exit;
22807  }
22808 
22809  /*
22810  * Adjust new record header
22811  */
22812  if (!OID_ISNULL (&context->class_oid) && !OID_IS_ROOTOID (&context->class_oid))
22813  {
22814  rc = heap_update_adjust_recdes_header (thread_p, context, is_mvcc_class);
22815  if (rc != NO_ERROR)
22816  {
22817  ASSERT_ERROR ();
22818  goto exit;
22819  }
22820  }
22821 
22822  HEAP_PERF_TRACK_PREPARE (thread_p, context);
22823 
22824  /*
22825  * Update record
22826  */
22827  switch (context->record_type)
22828  {
22829  case REC_RELOCATION:
22830  rc = heap_update_relocation (thread_p, context, is_mvcc_op);
22831  break;
22832 
22833  case REC_BIGONE:
22834  rc = heap_update_bigone (thread_p, context, is_mvcc_op);
22835  break;
22836 
22837  case REC_ASSIGN_ADDRESS:
22838  /* it's not an old record, it was inserted in this transaction */
22839  context->is_logical_old = false;
22840  /* fall trough */
22841  case REC_HOME:
22842  rc = heap_update_home (thread_p, context, is_mvcc_op);
22843  break;
22844 
22845  default:
22847  context->oid.slotid);
22849  goto exit;
22850  }
22851 
22852  /* check return code of operation */
22853  if (rc != NO_ERROR)
22854  {
22855  ASSERT_ERROR ();
22856  goto exit;
22857  }
22858 
22859  /*
22860  * Class update case
22861  */
22862  if (HFID_EQ ((&context->hfid), &(heap_Classrepr->rootclass_hfid)))
22863  {
22864  rc = heap_mark_class_as_modified (thread_p, &context->oid, or_chn (context->recdes_p), false);
22865  if (rc != NO_ERROR)
22866  {
22867  ASSERT_ERROR ();
22868  goto exit;
22869  }
22870  }
22871 
22872 exit:
22873 
22874  /* unfix or cache home page */
22875  if (context->home_page_watcher_p->pgptr != NULL && context->home_page_watcher_p == &context->home_page_watcher)
22876  {
22877  if (context->scan_cache_p != NULL && context->scan_cache_p->cache_last_fix_page)
22878  {
22879  pgbuf_replace_watcher (thread_p, context->home_page_watcher_p, &context->scan_cache_p->page_watcher);
22880  }
22881  else
22882  {
22883  pgbuf_ordered_unfix (thread_p, context->home_page_watcher_p);
22884  }
22885  }
22886 
22887  /* unfix pages */
22888  heap_unfix_watchers (thread_p, context);
22889 
22890 #if defined(ENABLE_SYSTEMTAP)
22891  CUBRID_OBJ_UPDATE_END (&context->class_oid, (rc != NO_ERROR));
22892 #endif /* ENABLE_SYSTEMTAP */
22893 
22894  return rc;
22895 }
22896 
22897 /*
22898  * heap_get_hfid_from_class_record () - get HFID from class record for the
22899  * given OID.
22900  * return: error_code
22901  * class_oid(in): class oid
22902  * hfid(out): the resulting hfid
22903  */
22904 static int
22905 heap_get_hfid_from_class_record (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid)
22906 {
22907  int error_code = NO_ERROR;
22908  RECDES recdes;
22909  HEAP_SCANCACHE scan_cache;
22910 
22911  if (class_oid == NULL || hfid == NULL)
22912  {
22913  return ER_FAILED;
22914  }
22915 
22916  (void) heap_scancache_quick_start_root_hfid (thread_p, &scan_cache);
22917 
22918  if (heap_get_class_record (thread_p, class_oid, &recdes, &scan_cache, PEEK) != S_SUCCESS)
22919  {
22920  heap_scancache_end (thread_p, &scan_cache);
22921  return ER_FAILED;
22922  }
22923 
22925 
22926  error_code = heap_scancache_end (thread_p, &scan_cache);
22927  if (error_code != NO_ERROR)
22928  {
22929  return error_code;
22930  }
22931 
22932  return error_code;
22933 }
22934 
22935 /*
22936  * heap_hfid_table_entry_alloc() - allocate a new structure for
22937  * the class OID->HFID hash
22938  * returns: new pointer or NULL on error
22939  */
22940 static void *
22941 heap_hfid_table_entry_alloc (void)
22942 {
22944  return (void *) new_entry;
22945 }
22946 
22947 /*
22948  * logtb_global_unique_stat_free () - free a hfid_table entry
22949  * returns: error code or NO_ERROR
22950  * entry(in): entry to free (HEAP_HFID_TABLE_ENTRY)
22951  */
22952 static int
22953 heap_hfid_table_entry_free (void *entry)
22954 {
22955  if (entry != NULL)
22956  {
22957  free (entry);
22958  return NO_ERROR;
22959  }
22960  else
22961  {
22962  return ER_FAILED;
22963  }
22964 }
22965 
22966 /*
22967  * heap_hfid_table_entry_init () - initialize a hfid_table entry
22968  * returns: error code or NO_ERROR
22969  * entry(in): hfid_table entry
22970  */
22971 static int
22972 heap_hfid_table_entry_init (void *entry)
22973 {
22974  HEAP_HFID_TABLE_ENTRY *entry_p = (HEAP_HFID_TABLE_ENTRY *) entry;
22975 
22976  if (entry_p == NULL)
22977  {
22978  return ER_FAILED;
22979  }
22980 
22981  /* initialize fields */
22982  OID_SET_NULL (&entry_p->class_oid);
22983  entry_p->hfid.vfid.fileid = NULL_FILEID;
22984  entry_p->hfid.vfid.volid = NULL_VOLID;
22985  entry_p->hfid.hpgid = NULL_PAGEID;
22986  entry_p->ftype = FILE_UNKNOWN_TYPE;
22987 
22988  return NO_ERROR;
22989 }
22990 
22991 /*
22992  * heap_hfid_table_entry_key_copy () - copy a hfid_table key
22993  * returns: error code or NO_ERROR
22994  * src(in): source
22995  * dest(in): destination
22996  */
22997 static int
22998 heap_hfid_table_entry_key_copy (void *src, void *dest)
22999 {
23000  if (src == NULL || dest == NULL)
23001  {
23002  return ER_FAILED;
23003  }
23004 
23005  COPY_OID ((OID *) dest, (OID *) src);
23006 
23007  /* all ok */
23008  return NO_ERROR;
23009 }
23010 
23011 /*
23012  * heap_hfid_table_entry_key_hash () - hashing function for the class OID->HFID
23013  * hash table
23014  * return: int
23015  * key(in): Session key
23016  * hash_table_size(in): Memory Hash Table Size
23017  *
23018  * Note: Generate a hash number for the given key for the given hash table
23019  * size.
23020  */
23021 static unsigned int
23022 heap_hfid_table_entry_key_hash (void *key, int hash_table_size)
23023 {
23024  return ((unsigned int) OID_PSEUDO_KEY ((OID *) key)) % hash_table_size;
23025 }
23026 
23027 /*
23028  * heap_hfid_table_entry_key_compare () - Compare two global unique
23029  * statistics keys (OIDs)
23030  * return: int (true or false)
23031  * k1 (in) : First OID key
23032  * k2 (in) : Second OID key
23033  */
23034 static int
23035 heap_hfid_table_entry_key_compare (void *k1, void *k2)
23036 {
23037  OID *key1, *key2;
23038 
23039  key1 = (OID *) k1;
23040  key2 = (OID *) k2;
23041 
23042  if (k1 == NULL || k2 == NULL)
23043  {
23044  /* should not happen */
23045  assert (false);
23046  return 0;
23047  }
23048 
23049  if (OID_EQ (key1, key2))
23050  {
23051  /* equal */
23052  return 0;
23053  }
23054  else
23055  {
23056  /* not equal */
23057  return 1;
23058  }
23059 }
23060 
23061 /*
23062  * heap_initialize_hfid_table () - Creates and initializes global structure
23063  * for global class OID->HFID hash table
23064  * return: error code
23065  * thread_p (in) :
23066  */
23067 int
23069 {
23070  int ret = NO_ERROR;
23071  LF_ENTRY_DESCRIPTOR *edesc = NULL;
23072 
23073  if (heap_Hfid_table != NULL)
23074  {
23075  return NO_ERROR;
23076  }
23077 
23078  edesc = &heap_Hfid_table_area.hfid_hash_descriptor;
23079 
23080  edesc->of_local_next = offsetof (HEAP_HFID_TABLE_ENTRY, stack);
23081  edesc->of_next = offsetof (HEAP_HFID_TABLE_ENTRY, next);
23082  edesc->of_del_tran_id = offsetof (HEAP_HFID_TABLE_ENTRY, del_id);
23083  edesc->of_key = offsetof (HEAP_HFID_TABLE_ENTRY, class_oid);
23084  edesc->of_mutex = 0;
23086  edesc->f_alloc = heap_hfid_table_entry_alloc;
23087  edesc->f_free = heap_hfid_table_entry_free;
23088  edesc->f_init = heap_hfid_table_entry_init;
23089  edesc->f_uninit = NULL;
23090  edesc->f_key_copy = heap_hfid_table_entry_key_copy;
23091  edesc->f_key_cmp = heap_hfid_table_entry_key_compare;
23092  edesc->f_hash = heap_hfid_table_entry_key_hash;
23093  edesc->f_duplicate = NULL;
23094 
23095  /* initialize freelist */
23096  ret = lf_freelist_init (&heap_Hfid_table_area.hfid_hash_freelist, 1, 100, edesc, &hfid_table_Ts);
23097  if (ret != NO_ERROR)
23098  {
23099  return ret;
23100  }
23101 
23102  /* initialize hash table */
23103  ret =
23104  lf_hash_init (&heap_Hfid_table_area.hfid_hash, &heap_Hfid_table_area.hfid_hash_freelist, HEAP_HFID_HASH_SIZE,
23105  edesc);
23106  if (ret != NO_ERROR)
23107  {
23108  lf_hash_destroy (&heap_Hfid_table_area.hfid_hash);
23109  return ret;
23110  }
23111 
23112  heap_Hfid_table = &heap_Hfid_table_area;
23113 
23114  return ret;
23115 }
23116 
23117 /*
23118  * heap_finalize_hfid_table () - Finalize class OID->HFID hash table
23119  * return: error code
23120  * thread_p (in) :
23121  */
23122 void
23124 {
23125  if (heap_Hfid_table != NULL)
23126  {
23127  /* destroy hash and freelist */
23128  lf_hash_destroy (&heap_Hfid_table->hfid_hash);
23129  lf_freelist_destroy (&heap_Hfid_table->hfid_hash_freelist);
23130 
23131  heap_Hfid_table = NULL;
23132  }
23133 }
23134 
23135 /*
23136  * heap_delete_hfid_from_cache () - deletes the entry associated with
23137  * the given class OID from the hfid table
23138  * return: error code
23139  * thread_p (in) :
23140  * class_oid (in) : the class OID for which the entry will be deleted
23141  */
23142 int
23144 {
23146  int error = NO_ERROR;
23147 
23148  error = lf_hash_delete (t_entry, &heap_Hfid_table->hfid_hash, class_oid, NULL);
23149  if (error != NO_ERROR)
23150  {
23151  return error;
23152  }
23153 
23154  return NO_ERROR;
23155 }
23156 
23157 /*
23158  * heap_vacuum_all_objects () - Vacuum all objects in heap.
23159  *
23160  * return : Error code.
23161  * thread_p (in) : Thread entry.
23162  * upd_scancache(in) : Update scan cache
23163  * threshold_mvccid(in) : Threshold MVCCID
23164  */
23165 int
23166 heap_vacuum_all_objects (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * upd_scancache, MVCCID threshold_mvccid)
23167 {
23168  PGBUF_WATCHER pg_watcher;
23169  PGBUF_WATCHER old_pg_watcher;
23170  VPID next_vpid, vpid;
23171  VACUUM_WORKER worker;
23172  int max_num_slots, i;
23173  OID temp_oid;
23174  bool reusable;
23175  int error_code = NO_ERROR;
23176 
23177  assert (upd_scancache != NULL);
23178  PGBUF_INIT_WATCHER (&pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, &upd_scancache->node.hfid);
23179  PGBUF_INIT_WATCHER (&old_pg_watcher, PGBUF_ORDERED_HEAP_NORMAL, &upd_scancache->node.hfid);
23180  memset (&worker, 0, sizeof (worker));
23181  max_num_slots = IO_MAX_PAGE_SIZE / sizeof (SPAGE_SLOT);
23182  worker.heap_objects = (VACUUM_HEAP_OBJECT *) malloc (max_num_slots * sizeof (VACUUM_HEAP_OBJECT));
23183  if (worker.heap_objects == NULL)
23184  {
23186  max_num_slots * sizeof (VACUUM_HEAP_OBJECT));
23187  error_code = ER_OUT_OF_VIRTUAL_MEMORY;
23188  goto exit;
23189  }
23190  worker.heap_objects_capacity = max_num_slots;
23191  worker.n_heap_objects = 0;
23192 
23193  next_vpid.volid = upd_scancache->node.hfid.vfid.volid;
23194  next_vpid.pageid = upd_scancache->node.hfid.hpgid;
23195  for (i = 0; i < max_num_slots; i++)
23196  {
23197  VFID_COPY (&worker.heap_objects[i].vfid, &upd_scancache->node.hfid.vfid);
23198  }
23199 
23200  reusable = heap_is_reusable_oid (upd_scancache->file_type);
23201  while (!VPID_ISNULL (&next_vpid))
23202  {
23203  vpid = next_vpid;
23204  error_code = pgbuf_ordered_fix (thread_p, &vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &pg_watcher);
23205  if (error_code != NO_ERROR)
23206  {
23207  goto exit;
23208  }
23209 
23210  (void) pgbuf_check_page_ptype (thread_p, pg_watcher.pgptr, PAGE_HEAP);
23211 
23212  if (old_pg_watcher.pgptr != NULL)
23213  {
23214  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
23215  }
23216 
23217  error_code = heap_vpid_next (thread_p, &upd_scancache->node.hfid, pg_watcher.pgptr, &next_vpid);
23218  if (error_code != NO_ERROR)
23219  {
23220  assert (false);
23221  goto exit;
23222  }
23223 
23224  temp_oid.volid = vpid.volid;
23225  temp_oid.pageid = vpid.pageid;
23226  worker.n_heap_objects = spage_number_of_slots (pg_watcher.pgptr) - 1;
23227  if (worker.n_heap_objects > 0
23228  && heap_page_get_vacuum_status (thread_p, pg_watcher.pgptr) != HEAP_PAGE_VACUUM_NONE)
23229  {
23230  for (i = 1; i <= worker.n_heap_objects; i++)
23231  {
23232  temp_oid.slotid = i;
23233  COPY_OID (&worker.heap_objects[i - 1].oid, &temp_oid);
23234  }
23235 
23236  error_code =
23237  vacuum_heap_page (thread_p, worker.heap_objects, worker.n_heap_objects, threshold_mvccid,
23238  &upd_scancache->node.hfid, &reusable, false);
23239  if (error_code != NO_ERROR)
23240  {
23241  goto exit;
23242  }
23243  }
23244 
23245  pgbuf_replace_watcher (thread_p, &pg_watcher, &old_pg_watcher);
23246  }
23247 
23248 exit:
23249  if (pg_watcher.pgptr != NULL)
23250  {
23251  pgbuf_ordered_unfix (thread_p, &pg_watcher);
23252  }
23253  if (old_pg_watcher.pgptr != NULL)
23254  {
23255  pgbuf_ordered_unfix (thread_p, &old_pg_watcher);
23256  }
23257 
23258  if (worker.heap_objects != NULL)
23259  {
23260  free_and_init (worker.heap_objects);
23261  }
23262  return error_code;
23263 }
23264 
23265 /*
23266  * heap_insert_hfid_for_class_oid () - Cache HFID for class object.
23267  *
23268  * return : Error code.
23269  * thread_p (in) : Thread entry.
23270  * class_oid (in) : Class OID.
23271  * hfid (in) : Heap file ID.
23272  * ftype (in) : FILE_HEAP or FILE_HEAP_REUSE_SLOTS.
23273  */
23274 int
23275 heap_insert_hfid_for_class_oid (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid, FILE_TYPE ftype)
23276 {
23277  int error_code = NO_ERROR;
23279  HEAP_HFID_TABLE_ENTRY *entry = NULL;
23280 
23281  assert (hfid != NULL && !HFID_IS_NULL (hfid));
23282  assert (ftype == FILE_HEAP || ftype == FILE_HEAP_REUSE_SLOTS);
23283 
23284  if (class_oid == NULL || OID_ISNULL (class_oid))
23285  {
23286  /* We can't cache it. */
23287  return NO_ERROR;
23288  }
23289 
23290  error_code =
23291  lf_hash_find_or_insert (t_entry, &heap_Hfid_table->hfid_hash, (void *) class_oid, (void **) &entry, NULL);
23292  if (error_code != NO_ERROR)
23293  {
23294  return error_code;
23295  }
23296  assert (entry != NULL);
23297  assert (entry->hfid.hpgid == NULL_PAGEID);
23298 
23299  HFID_COPY (&entry->hfid, hfid);
23300  entry->ftype = ftype;
23301  lf_tran_end_with_mb (t_entry);
23302 
23303  /* Successfully cached. */
23304  return NO_ERROR;
23305 }
23306 
23307 /*
23308  * heap_hfid_cache_get () - returns the HFID of the
23309  * class with the given class OID
23310  * return: error code
23311  * thread_p (in) :
23312  * class OID (in) : the class OID for which the entry will be returned
23313  * hfid_out (out):
23314  *
23315  * Note: if the entry is not found, one will be inserted and the HFID is
23316  * retrieved from the class record.
23317  */
23318 static int
23319 heap_hfid_cache_get (THREAD_ENTRY * thread_p, const OID * class_oid, HFID * hfid_out, FILE_TYPE * ftype_out)
23320 {
23321  int error_code = NO_ERROR;
23323  HEAP_HFID_TABLE_ENTRY *entry = NULL;
23324 
23325  assert (class_oid != NULL && !OID_ISNULL (class_oid));
23326 
23327  error_code =
23328  lf_hash_find_or_insert (t_entry, &heap_Hfid_table->hfid_hash, (void *) class_oid, (void **) &entry, NULL);
23329  if (error_code != NO_ERROR)
23330  {
23331  ASSERT_ERROR ();
23332  return error_code;
23333  }
23334  assert (entry != NULL);
23335 
23336 
23337  if (entry->hfid.hpgid == NULL_PAGEID || entry->hfid.vfid.fileid == NULL_FILEID
23338  || entry->hfid.vfid.volid == NULL_VOLID)
23339  {
23340  HFID hfid_local = HFID_INITIALIZER;
23341 
23342  /* root HFID should already be added. */
23343  if (OID_IS_ROOTOID (class_oid))
23344  {
23345  assert_release (false);
23346  boot_find_root_heap (&entry->hfid);
23347  entry->ftype = FILE_HEAP;
23348  lf_tran_end_with_mb (t_entry);
23349  return NO_ERROR;
23350  }
23351 
23352  /* this is either a newly inserted entry or one with incomplete information that is currently being filled by
23353  * another transaction. We need to retrieve the HFID from the class record. We do not care that we are
23354  * overwriting the information, since it must be always the same (the HFID never changes for the same class OID). */
23355  error_code = heap_get_hfid_from_class_record (thread_p, class_oid, &hfid_local);
23356  if (error_code != NO_ERROR)
23357  {
23358  ASSERT_ERROR ();
23359  lf_tran_end_with_mb (t_entry);
23360  return error_code;
23361  }
23362  entry->hfid = hfid_local;
23363  }
23364  assert (entry->hfid.hpgid != NULL_PAGEID && entry->hfid.vfid.fileid != NULL_FILEID
23365  && entry->hfid.vfid.volid != NULL_VOLID);
23366  if (entry->ftype == FILE_UNKNOWN_TYPE)
23367  {
23368  FILE_TYPE ftype_local;
23369  error_code = file_get_type (thread_p, &entry->hfid.vfid, &ftype_local);
23370  if (error_code != NO_ERROR)
23371  {
23372  ASSERT_ERROR ();
23373  lf_tran_end_with_mb (t_entry);
23374  return error_code;
23375  }
23376  entry->ftype = ftype_local;
23377  }
23378  assert (entry->ftype == FILE_HEAP || entry->ftype == FILE_HEAP_REUSE_SLOTS);
23379 
23380  if (hfid_out != NULL)
23381  {
23382  *hfid_out = entry->hfid;
23383  }
23384  if (ftype_out != NULL)
23385  {
23386  *ftype_out = entry->ftype;
23387  }
23388 
23389  lf_tran_end_with_mb (t_entry);
23390  return error_code;
23391 }
23392 
23393 /*
23394  * heap_page_update_chain_after_mvcc_op () - Update max MVCCID and vacuum
23395  * status in heap page chain after
23396  * an MVCC op is executed.
23397  *
23398  * return : Void.
23399  * thread_p (in) : Thread entry.
23400  * heap_page (in) : Heap page.
23401  * mvccid (in) : MVCC op MVCCID.
23402  */
23403 static void
23404 heap_page_update_chain_after_mvcc_op (THREAD_ENTRY * thread_p, PAGE_PTR heap_page, MVCCID mvccid)
23405 {
23406  HEAP_CHAIN *chain;
23407  RECDES chain_recdes;
23408  HEAP_PAGE_VACUUM_STATUS vacuum_status;
23409 
23410  assert (heap_page != NULL);
23411  assert (MVCCID_IS_NORMAL (mvccid));
23412 
23413  /* Two actions are being done here: 1. Update vacuum status. - HEAP_PAGE_VACUUM_NONE + 1 mvcc op =>
23414  * HEAP_PAGE_VACUUM_ONCE - HEAP_PAGE_VACUUM_ONCE + 1 mvcc op => HEAP_PAGE_VACUUM_UNKNOWN (because future becomes
23415  * unpredictable). - HEAP_PAGE_VACUUM_UNKNOWN + 1 mvcc op can we tell that page is vacuumed? =>
23416  * HEAP_PAGE_VACUUM_ONCE we don't know that page is vacuumed? => HEAP_PAGE_VACUUM_UNKNOWN 2. Update max MVCCID if
23417  * new MVCCID is bigger. */
23418 
23419  /* Get heap chain. */
23420  if (spage_get_record (thread_p, heap_page, HEAP_HEADER_AND_CHAIN_SLOTID, &chain_recdes, PEEK) != S_SUCCESS)
23421  {
23422  assert_release (false);
23423  return;
23424  }
23425  if (chain_recdes.length != sizeof (HEAP_CHAIN))
23426  {
23427  /* Heap header page. Do nothing. */
23428  assert (chain_recdes.length == sizeof (HEAP_HDR_STATS));
23429  return;
23430  }
23431  chain = (HEAP_CHAIN *) chain_recdes.data;
23432 
23433  /* Update vacuum status. */
23434  vacuum_status = HEAP_PAGE_GET_VACUUM_STATUS (chain);
23435  switch (vacuum_status)
23436  {
23437  case HEAP_PAGE_VACUUM_NONE:
23438  /* Change status to one vacuum. */
23439  assert (MVCC_ID_PRECEDES (chain->max_mvccid, mvccid));
23442  "Changed vacuum status for page %d|%d, lsa=%lld|%d from no vacuum to vacuum once.",
23443  PGBUF_PAGE_STATE_ARGS (heap_page));
23444  break;
23445 
23446  case HEAP_PAGE_VACUUM_ONCE:
23447  /* Change status to unknown number of vacuums. */
23450  "Changed vacuum status for page %d|%d, lsa=%lld|%d from vacuum once to unknown.",
23451  PGBUF_PAGE_STATE_ARGS (heap_page));
23452  break;
23453 
23455  /* Was page completely vacuumed? We can tell if current max_mvccid precedes vacuum data's oldest mvccid. */
23457  {
23458  /* Now page must be vacuumed once, due to new MVCC op. */
23461  "Changed vacuum status for page %d|%d, lsa=%lld|%d from unknown to vacuum once.",
23462  PGBUF_PAGE_STATE_ARGS (heap_page));
23463  }
23464  else
23465  {
23466  /* Status remains the same. Number of vacuums needed still cannot be predicted. */
23467  vacuum_er_log (VACUUM_ER_LOG_HEAP, "Vacuum status for page %d|%d, %lld|%d remains unknown.",
23468  PGBUF_PAGE_STATE_ARGS (heap_page));
23469  }
23470  break;
23471  default:
23472  assert_release (false);
23473  break;
23474  }
23475 
23476  /* Update max_mvccid. */
23477  if (MVCC_ID_PRECEDES (chain->max_mvccid, mvccid))
23478  {
23479  vacuum_er_log (VACUUM_ER_LOG_HEAP, "Update max MVCCID for page %d|%d from %llu to %llu.",
23480  PGBUF_PAGE_VPID_AS_ARGS (heap_page), (unsigned long long int) chain->max_mvccid,
23481  (unsigned long long int) mvccid);
23482  chain->max_mvccid = mvccid;
23483  }
23484 }
23485 
23486 /*
23487  * heap_page_rv_vacuum_status_change () - Applies vacuum status change for
23488  * recovery.
23489  *
23490  * return : Void.
23491  * thread_p (in) : Thread entry.
23492  * heap_page (in) : Heap page.
23493  */
23494 static void
23495 heap_page_rv_chain_update (THREAD_ENTRY * thread_p, PAGE_PTR heap_page, MVCCID mvccid, bool vacuum_status_change)
23496 {
23497  HEAP_CHAIN *chain;
23498  RECDES chain_recdes;
23499  HEAP_PAGE_VACUUM_STATUS vacuum_status;
23500 
23501  assert (heap_page != NULL);
23502 
23503  /* Possible transitions (see heap_page_update_chain_after_mvcc_op): - HEAP_PAGE_VACUUM_NONE => HEAP_PAGE_VACUUM_ONCE.
23504  * - HEAP_PAGE_VACUUM_ONCE => HEAP_PAGE_VACUUM_UNKNOWN. - HEAP_PAGE_VACUUM_UNKNOWN => HEAP_PAGE_VACUUM_ONCE. */
23505 
23506  /* Get heap chain. */
23507  if (spage_get_record (thread_p, heap_page, HEAP_HEADER_AND_CHAIN_SLOTID, &chain_recdes, PEEK) != S_SUCCESS)
23508  {
23509  assert_release (false);
23510  return;
23511  }
23512  if (chain_recdes.length != sizeof (HEAP_CHAIN))
23513  {
23514  /* Header page. Don't change chain. */
23515  return;
23516  }
23517  chain = (HEAP_CHAIN *) chain_recdes.data;
23518 
23519  if (vacuum_status_change)
23520  {
23521  /* Change status. */
23522  vacuum_status = HEAP_PAGE_GET_VACUUM_STATUS (chain);
23523  switch (vacuum_status)
23524  {
23525  case HEAP_PAGE_VACUUM_NONE:
23528 
23530  "Change heap page %d|%d, lsa=%lld|%d, status from %s to once.",
23531  PGBUF_PAGE_STATE_ARGS (heap_page),
23532  vacuum_status == HEAP_PAGE_VACUUM_NONE ? "none" : "unknown");
23533  break;
23534  case HEAP_PAGE_VACUUM_ONCE:
23536 
23538  "Change heap page %d|%d, lsa=%lld|%d, status from once to unknown.",
23539  PGBUF_PAGE_STATE_ARGS (heap_page));
23540  break;
23541  }
23542  }
23543  if (MVCC_ID_PRECEDES (chain->max_mvccid, mvccid))
23544  {
23545  chain->max_mvccid = mvccid;
23546  }
23547 }
23548 
23549 /*
23550  * heap_page_set_vacuum_status_none () - Change vacuum status from one vacuum
23551  * required to none.
23552  *
23553  * return : Void.
23554  * thread_p (in) : Thread entry.
23555  * heap_page (in) : Heap page.
23556  */
23557 void
23559 {
23560  HEAP_CHAIN *chain;
23561  RECDES chain_recdes;
23562 
23563  assert (heap_page != NULL);
23564 
23565  /* Updating vacuum status: - HEAP_PAGE_VACUUM_NONE => Vacuum is not expected. Fail. - HEAP_PAGE_VACUUM_ONCE + 1
23566  * vacuum => HEAP_PAGE_VACUUM_NONE. - HEAP_PAGE_VACUUM_UNKNOWN + 1 vacuum => HEAP_PAGE_VACUUM_UNKNOWN. Number of
23567  * vacuums expected is unknown and remains that way. */
23568 
23569  /* Get heap chain. */
23570  if (spage_get_record (thread_p, heap_page, HEAP_HEADER_AND_CHAIN_SLOTID, &chain_recdes, PEEK) != S_SUCCESS)
23571  {
23572  assert_release (false);
23573  return;
23574  }
23575  if (chain_recdes.length != sizeof (HEAP_CHAIN))
23576  {
23577  /* Heap header page. */
23578  /* Should never be here. */
23579  assert_release (false);
23580  return;
23581  }
23582  chain = (HEAP_CHAIN *) chain_recdes.data;
23583 
23585 
23586  /* Update vacuum status. */
23588 
23589  vacuum_er_log (VACUUM_ER_LOG_HEAP, "Changed vacuum status for page %d|%d from vacuum once to no vacuum.",
23590  PGBUF_PAGE_VPID_AS_ARGS (heap_page));
23591 }
23592 
23593 /*
23594  * heap_page_get_max_mvccid () - Get max MVCCID of heap page.
23595  *
23596  * return : Max MVCCID.
23597  * thread_p (in) : Thread entry.
23598  * heap_page (in) : Heap page.
23599  */
23600 MVCCID
23602 {
23603  HEAP_CHAIN *chain;
23604  RECDES chain_recdes;
23605 
23606  assert (heap_page != NULL);
23607 
23608  /* Get heap chain. */
23609  if (spage_get_record (thread_p, heap_page, HEAP_HEADER_AND_CHAIN_SLOTID, &chain_recdes, PEEK) != S_SUCCESS)
23610  {
23611  assert_release (false);
23612  return MVCCID_NULL;
23613  }
23614  if (chain_recdes.length != sizeof (HEAP_CHAIN))
23615  {
23616  /* Heap header page. */
23617  assert (chain_recdes.length == sizeof (HEAP_HDR_STATS));
23618  return MVCCID_NULL;
23619  }
23620  chain = (HEAP_CHAIN *) chain_recdes.data;
23621 
23622  return chain->max_mvccid;
23623 }
23624 
23625 /*
23626  * heap_page_get_vacuum_status () - Get heap page vacuum status.
23627  *
23628  * return : Vacuum status.
23629  * thread_p (in) : Thread entry.
23630  * heap_page (in) : Heap page.
23631  */
23634 {
23635  HEAP_CHAIN *chain;
23636  RECDES chain_recdes;
23637 
23638  assert (heap_page != NULL);
23639 
23640  /* Get heap chain. */
23641  if (spage_get_record (thread_p, heap_page, HEAP_HEADER_AND_CHAIN_SLOTID, &chain_recdes, PEEK) != S_SUCCESS)
23642  {
23643  assert_release (false);
23644  return HEAP_PAGE_VACUUM_UNKNOWN;
23645  }
23646  if (chain_recdes.length != sizeof (HEAP_CHAIN))
23647  {
23648  /* Heap header page. */
23649  assert (chain_recdes.length == sizeof (HEAP_HDR_STATS));
23650  return HEAP_PAGE_VACUUM_UNKNOWN;
23651  }
23652  chain = (HEAP_CHAIN *) chain_recdes.data;
23653 
23654  return HEAP_PAGE_GET_VACUUM_STATUS (chain);
23655 }
23656 
23657 /*
23658  * heap_rv_nop () - Heap recovery no op function.
23659  *
23660  * return : NO_ERROR.
23661  * thread_p (in) : Thread entry.
23662  * rcv (in) : Recovery data.
23663  */
23664 int
23665 heap_rv_nop (THREAD_ENTRY * thread_p, LOG_RCV * rcv)
23666 {
23667  assert (rcv->pgptr != NULL);
23668  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
23669 
23670  return NO_ERROR;
23671 }
23672 
23673 /*
23674  * heap_rv_update_chain_after_mvcc_op () - Redo update of page chain after
23675  * an MVCC operation (used for
23676  * operations that are not changing
23677  *
23678  *
23679  * return : NO_ERROR
23680  * thread_p (in) : Thread entry.
23681  * rcv (in) : Recovery data.
23682  */
23683 int
23685 {
23686  bool vacuum_status_change = false;
23687 
23688  assert (rcv->pgptr != NULL);
23689  assert (MVCCID_IS_NORMAL (rcv->mvcc_id));
23690 
23691  vacuum_status_change = (rcv->offset & HEAP_RV_FLAG_VACUUM_STATUS_CHANGE) != 0;
23692  heap_page_rv_chain_update (thread_p, rcv->pgptr, rcv->mvcc_id, vacuum_status_change);
23693  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
23694  return NO_ERROR;
23695 }
23696 
23697 /*
23698  * heap_rv_remove_flags_from_offset () - Remove flags from recovery offset.
23699  *
23700  * return : Offset without flags.
23701  * offset (in) : Offset with flags.
23702  */
23703 INT16
23705 {
23707 }
23708 
23709 /*
23710  * heap_should_try_update_stat () - checks if an heap update statistics is
23711  * indicated
23712  *
23713  *
23714  * return : NO_ERROR
23715  * thread_p (in) : Thread entry.
23716  * rcv (in) : Recovery data.
23717  */
23718 bool
23719 heap_should_try_update_stat (const int current_freespace, const int prev_freespace)
23720 {
23721  if (current_freespace > prev_freespace && current_freespace > HEAP_DROP_FREE_SPACE
23722  && prev_freespace < HEAP_DROP_FREE_SPACE)
23723  {
23724  return true;
23725  }
23726  return false;
23727 }
23728 
23729 /*
23730  * heap_scancache_add_partition_node () - add a new partition information to
23731  * to the scan_cache's partition list.
23732  * Also sets the current node of the
23733  * scancache to this newly inserted node.
23734  *
23735  * return : error code
23736  * thread_p (in) :
23737  * scan_cache (in) :
23738  * partition_oid (in) :
23739  */
23740 static int
23741 heap_scancache_add_partition_node (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache, OID * partition_oid)
23742 {
23743  HFID hfid;
23745 
23746  assert (scan_cache != NULL);
23747 
23748  if (heap_get_hfid_from_class_oid (thread_p, partition_oid, &hfid) != NO_ERROR)
23749  {
23750  return ER_FAILED;
23751  }
23752 
23753  new_ = (HEAP_SCANCACHE_NODE_LIST *) db_private_alloc (thread_p, sizeof (HEAP_SCANCACHE_NODE_LIST));
23754  if (new_ == NULL)
23755  {
23757  return ER_OUT_OF_VIRTUAL_MEMORY;
23758  }
23759 
23760  COPY_OID (&new_->node.class_oid, partition_oid);
23761  HFID_COPY (&new_->node.hfid, &hfid);
23762  if (scan_cache->partition_list == NULL)
23763  {
23764  new_->next = NULL;
23765  scan_cache->partition_list = new_;
23766  }
23767  else
23768  {
23769  new_->next = scan_cache->partition_list;
23770  scan_cache->partition_list = new_;
23771  }
23772 
23773  /* set the new node as the current node */
23774  HEAP_SCANCACHE_SET_NODE (scan_cache, partition_oid, &hfid);
23775 
23776  return NO_ERROR;
23777 }
23778 
23779 /*
23780  * heap_mvcc_log_redistribute () - Log partition redistribute data
23781  *
23782  * return : Void.
23783  * thread_p (in) : Thread entry.
23784  * p_recdes (in) : Newly inserted record.
23785  * p_addr (in) : Log address data.
23786  */
23787 static void
23788 heap_mvcc_log_redistribute (THREAD_ENTRY * thread_p, RECDES * p_recdes, LOG_DATA_ADDR * p_addr)
23789 {
23790 #define HEAP_LOG_MVCC_REDISTRIBUTE_MAX_REDO_CRUMBS 4
23791 
23792  int n_redo_crumbs = 0, data_copy_offset = 0;
23794  MVCCID delid;
23796  HEAP_PAGE_VACUUM_STATUS vacuum_status;
23797 
23798  assert (p_recdes != NULL);
23799  assert (p_addr != NULL);
23800 
23801  vacuum_status = heap_page_get_vacuum_status (thread_p, p_addr->pgptr);
23802 
23803  /* Update chain. */
23804  heap_page_update_chain_after_mvcc_op (thread_p, p_addr->pgptr, logtb_get_current_mvccid (thread_p));
23805  if (vacuum_status != heap_page_get_vacuum_status (thread_p, p_addr->pgptr))
23806  {
23807  /* Mark status change for recovery. */
23809  }
23810 
23811  /* Build redo crumbs */
23812  /* Add record type */
23813  redo_crumbs[n_redo_crumbs].length = sizeof (p_recdes->type);
23814  redo_crumbs[n_redo_crumbs++].data = &p_recdes->type;
23815 
23816  if (p_recdes->type != REC_BIGONE)
23817  {
23818  or_mvcc_get_header (p_recdes, &mvcc_rec_header);
23820 
23821  /* Add representation ID and flags field */
23822  redo_crumbs[n_redo_crumbs].length = OR_INT_SIZE;
23823  redo_crumbs[n_redo_crumbs++].data = p_recdes->data;
23824 
23825  redo_crumbs[n_redo_crumbs].length = OR_MVCCID_SIZE;
23826  redo_crumbs[n_redo_crumbs++].data = &delid;
23827 
23828  /* Set data copy offset after the record header */
23829  data_copy_offset = OR_HEADER_SIZE (p_recdes->data);
23830  }
23831 
23832  /* Add record data - record may be skipped if the record is not big one */
23833  redo_crumbs[n_redo_crumbs].length = p_recdes->length - data_copy_offset;
23834  redo_crumbs[n_redo_crumbs++].data = p_recdes->data + data_copy_offset;
23835 
23836  /* Safe guard */
23838 
23839  /* Append redo crumbs; undo crumbs not necessary as the spage_delete physical operation uses the offset field of the
23840  * address */
23841  log_append_undoredo_crumbs (thread_p, RVHF_MVCC_REDISTRIBUTE, p_addr, 0, n_redo_crumbs, NULL, redo_crumbs);
23842 }
23843 
23844 /*
23845  * heap_rv_mvcc_redo_redistribute () - Redo the MVCC redistribute partition data
23846  * return: int
23847  * rcv(in): Recovery structure
23848  *
23849  */
23850 int
23852 {
23853  INT16 slotid;
23854  RECDES recdes;
23855  int sp_success;
23856  MVCCID delid;
23858  INT16 record_type;
23859  bool vacuum_status_change = false;
23860 
23861  assert (rcv->pgptr != NULL);
23862 
23863  slotid = rcv->offset;
23864  if (slotid & HEAP_RV_FLAG_VACUUM_STATUS_CHANGE)
23865  {
23866  vacuum_status_change = true;
23867  }
23868  slotid = slotid & (~HEAP_RV_FLAG_VACUUM_STATUS_CHANGE);
23869  assert (slotid > 0);
23870 
23871  record_type = *(INT16 *) rcv->data;
23872  if (record_type == REC_BIGONE)
23873  {
23874  /* no data header */
23875  HEAP_SET_RECORD (&recdes, rcv->length - sizeof (record_type), rcv->length - sizeof (record_type), REC_BIGONE,
23876  rcv->data + sizeof (record_type));
23877  }
23878  else
23879  {
23881  int repid_and_flags, offset, mvcc_flag, offset_size;
23882 
23883  offset = sizeof (record_type);
23884 
23885  repid_and_flags = OR_GET_INT (rcv->data + offset);
23886  offset += OR_INT_SIZE;
23887 
23888  OR_GET_MVCCID (rcv->data + offset, &delid);
23890 
23891  mvcc_flag = (char) ((repid_and_flags >> OR_MVCC_FLAG_SHIFT_BITS) & OR_MVCC_FLAG_MASK);
23892 
23893  if ((repid_and_flags & OR_OFFSET_SIZE_FLAG) == OR_OFFSET_SIZE_1BYTE)
23894  {
23895  offset_size = OR_BYTE_SIZE;
23896  }
23897  else if ((repid_and_flags & OR_OFFSET_SIZE_FLAG) == OR_OFFSET_SIZE_2BYTE)
23898  {
23899  offset_size = OR_SHORT_SIZE;
23900  }
23901  else
23902  {
23903  offset_size = OR_INT_SIZE;
23904  }
23905 
23906  MVCC_SET_REPID (&mvcc_rec_header, repid_and_flags & OR_MVCC_REPID_MASK);
23907  MVCC_SET_FLAG (&mvcc_rec_header, mvcc_flag);
23909  MVCC_SET_DELID (&mvcc_rec_header, delid);
23910 
23912  PTR_ALIGN (data_buffer, MAX_ALIGNMENT));
23913  or_mvcc_add_header (&recdes, &mvcc_rec_header, repid_and_flags & OR_BOUND_BIT_FLAG, offset_size);
23914 
23915  memcpy (recdes.data + recdes.length, rcv->data + offset, rcv->length - offset);
23916  recdes.length += (rcv->length - offset);
23917  }
23918 
23919  sp_success = spage_insert_for_recovery (thread_p, rcv->pgptr, slotid, &recdes);
23920 
23921  if (sp_success != SP_SUCCESS)
23922  {
23923  /* Unable to redo insertion */
23924  assert_release (false);
23925  return ER_FAILED;
23926  }
23927 
23928  heap_page_rv_chain_update (thread_p, rcv->pgptr, rcv->mvcc_id, vacuum_status_change);
23929  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
23930 
23931  return NO_ERROR;
23932 }
23933 
23934 /*
23935  * heap_get_visible_version_from_log () - Iterate through old versions of object until a visible object is found
23936  *
23937  * return: SCAN_CODE. Possible values:
23938  * - S_SUCCESS: for successful case when record was obtained.
23939  * - S_DOESNT_EXIT: NULL LSA was provided, otherwise a visible version should exist
23940  * - S_DOESNT_FIT: the record doesn't fit in allocated area
23941  * - S_ERROR: In case of error
23942  * thread_p (in): Thread entry.
23943  * recdes (out): Record descriptor.
23944  * previous_version_lsa (in): Log address of previous version.
23945  * scan_cache(in): Heap scan cache.
23946  */
23947 static SCAN_CODE
23948 heap_get_visible_version_from_log (THREAD_ENTRY * thread_p, RECDES * recdes, LOG_LSA * previous_version_lsa,
23949  HEAP_SCANCACHE * scan_cache, int has_chn)
23950 {
23951  LOG_LSA process_lsa;
23952  SCAN_CODE scan_code = S_SUCCESS;
23953  char log_pgbuf[IO_MAX_PAGE_SIZE + MAX_ALIGNMENT];
23954  LOG_PAGE *log_page_p = NULL;
23955  MVCC_REC_HEADER mvcc_header;
23956  RECDES local_recdes;
23957  MVCC_SATISFIES_SNAPSHOT_RESULT snapshot_res;
23958  LOG_LSA oldest_prior_lsa;
23959 
23960  assert (scan_cache != NULL);
23961  assert (scan_cache->mvcc_snapshot != NULL);
23962 
23963  if (recdes == NULL)
23964  {
23965  recdes = &local_recdes;
23966  recdes->data = NULL;
23967  }
23968 
23969  /* make sure prev_version_lsa is flushed from prior lsa list - wake up log flush thread if it's not flushed */
23970  oldest_prior_lsa = *log_get_append_lsa (); /* TODO: fix atomicity issue on x86 */
23971  if (LSA_LT (&oldest_prior_lsa, previous_version_lsa))
23972  {
23973  LOG_CS_ENTER (thread_p);
23975  LOG_CS_EXIT (thread_p);
23976 
23977  oldest_prior_lsa = *log_get_append_lsa ();
23978  assert (!LSA_LT (&oldest_prior_lsa, previous_version_lsa));
23979  }
23980 
23981  if (recdes->data == NULL)
23982  {
23983  if (scan_cache->area == NULL)
23984  {
23985  /* Allocate an area to hold the object. Assume that the object will fit in two pages for not better
23986  * estimates. */
23987  scan_cache->area_size = DB_PAGESIZE * 2;
23988  scan_cache->area = (char *) db_private_alloc (thread_p, scan_cache->area_size);
23989  if (scan_cache->area == NULL)
23990  {
23992  scan_cache->area_size = -1;
23993  return S_ERROR;
23994  }
23995  }
23996  recdes->data = scan_cache->area;
23997  recdes->area_size = scan_cache->area_size;
23998  }
23999 
24000  /* check visibility of old versions from log following prev_version_lsa links */
24001  for (LSA_COPY (&process_lsa, previous_version_lsa); !LSA_ISNULL (&process_lsa);)
24002  {
24003  /* Fetch the page where prev_vesion_lsa is located */
24004  log_page_p = (LOG_PAGE *) PTR_ALIGN (log_pgbuf, MAX_ALIGNMENT);
24005  log_page_p->hdr.logical_pageid = NULL_PAGEID;
24006  log_page_p->hdr.offset = NULL_OFFSET;
24007  if (logpb_fetch_page (thread_p, &process_lsa, LOG_CS_SAFE_READER, log_page_p) != NO_ERROR)
24008  {
24009  assert (false);
24010  logpb_fatal_error (thread_p, true, ARG_FILE_LINE, "heap_get_visible_version_from_log");
24011  return S_ERROR;
24012  }
24013 
24014  scan_code = log_get_undo_record (thread_p, log_page_p, process_lsa, recdes);
24015  if (scan_code != S_SUCCESS)
24016  {
24017  if (scan_code == S_DOESNT_FIT && recdes->data == scan_cache->area)
24018  {
24019  /* expand record area and try again */
24020  recdes->data = (char *) db_private_realloc (thread_p, scan_cache->area, -recdes->length);
24021  if (recdes->data == NULL)
24022  {
24024  return S_ERROR;
24025  }
24026  recdes->area_size = scan_cache->area_size = -recdes->length;
24027  scan_cache->area = recdes->data;
24028 
24029  /* final try to get the undo record */
24030  continue;
24031  }
24032  else
24033  {
24034  return scan_code;
24035  }
24036  }
24037 
24038  if (or_mvcc_get_header (recdes, &mvcc_header) != NO_ERROR)
24039  {
24040  assert (false);
24042  return S_ERROR;
24043  }
24044  snapshot_res = scan_cache->mvcc_snapshot->snapshot_fnc (thread_p, &mvcc_header, scan_cache->mvcc_snapshot);
24045  if (snapshot_res == SNAPSHOT_SATISFIED)
24046  {
24047  /* Visible. Get record if CHN was changed. */
24048  if (MVCC_IS_CHN_UPTODATE (&mvcc_header, has_chn))
24049  {
24050  return S_SUCCESS_CHN_UPTODATE;
24051  }
24052  return S_SUCCESS;
24053  }
24054  else if (snapshot_res == TOO_OLD_FOR_SNAPSHOT)
24055  {
24056  assert (false);
24058  return S_ERROR;
24059  }
24060  else
24061  {
24062  /* TOO_NEW_FOR_SNAPSHOT */
24063  assert (snapshot_res == TOO_NEW_FOR_SNAPSHOT);
24064  /* continue with previous version */
24065  LSA_COPY (&process_lsa, &MVCC_GET_PREV_VERSION_LSA (&mvcc_header));
24066  continue;
24067  }
24068  }
24069 
24070  /* No visible version found. */
24071  return S_DOESNT_EXIST;
24072 }
24073 
24074 
24075 /*
24076  * heap_get_visible_version () - get visible version, mvcc style when snapshot provided, otherwise directly from heap
24077  *
24078  * return: SCAN_CODE. Posible values:
24079  * - S_SUCCESS: for successful case when record was obtained.
24080  * - S_DOESNT_EXIT:
24081  * - S_DOESNT_FIT: the record doesn't fit in allocated area
24082  * - S_ERROR: In case of error
24083  * - S_SNAPSHOT_NOT_SATISFIED
24084  * - S_SUCCESS_CHN_UPTODATE: CHN is up to date and it's not necessary to get record again
24085  * thread_p (in): Thread entry.
24086  * oid (in): Object to be obtained.
24087  * class_oid (in):
24088  * recdes (out): Record descriptor. NULL if not needed
24089  * scan_cache(in): Heap scan cache.
24090  * ispeeking(in): Peek record or copy.
24091  * old_chn (in): Cache coherency number for existing record data. It is
24092  * used by clients to avoid resending record data when
24093  * it was not updated.
24094  * Note: this function should not be used for heap scan;
24095  */
24096 SCAN_CODE
24097 heap_get_visible_version (THREAD_ENTRY * thread_p, const OID * oid, OID * class_oid, RECDES * recdes,
24098  HEAP_SCANCACHE * scan_cache, int ispeeking, int old_chn)
24099 {
24100  SCAN_CODE scan = S_SUCCESS;
24101  HEAP_GET_CONTEXT context;
24102 
24103  heap_init_get_context (thread_p, &context, oid, class_oid, recdes, scan_cache, ispeeking, old_chn);
24104 
24105  scan = heap_get_visible_version_internal (thread_p, &context, false);
24106 
24107  heap_clean_get_context (thread_p, &context);
24108 
24109  return scan;
24110 }
24111 
24112 /*
24113 * heap_scan_get_visible_version () - get visible version, mvcc style when snapshot provided, otherwise directly from heap
24114 *
24115 * return: SCAN_CODE. Posible values:
24116 * - S_SUCCESS: for successful case when record was obtained.
24117 * - S_DOESNT_EXIT:
24118 * - S_DOESNT_FIT: the record doesn't fit in allocated area
24119 * - S_ERROR: In case of error
24120 * - S_SNAPSHOT_NOT_SATISFIED
24121 * - S_SUCCESS_CHN_UPTODATE: CHN is up to date and it's not necessary to get record again
24122 * thread_p (in): Thread entry.
24123 * oid (in): Object to be obtained.
24124 * class_oid (in):
24125 * recdes (out): Record descriptor. NULL if not needed
24126 * scan_cache(in): Heap scan cache.
24127 * ispeeking(in): Peek record or copy.
24128 * old_chn (in): Cache coherency number for existing record data. It is
24129 * used by clients to avoid resending record data when
24130 * it was not updated.
24131 * Note: this function should be used for heap scan;
24132 */
24133 SCAN_CODE
24134 heap_scan_get_visible_version (THREAD_ENTRY * thread_p, const OID * oid, OID * class_oid, RECDES * recdes,
24135  HEAP_SCANCACHE * scan_cache, int ispeeking, int old_chn)
24136 {
24137  SCAN_CODE scan = S_SUCCESS;
24138  HEAP_GET_CONTEXT context;
24139 
24140  heap_init_get_context (thread_p, &context, oid, class_oid, recdes, scan_cache, ispeeking, old_chn);
24141 
24142  scan = heap_get_visible_version_internal (thread_p, &context, true);
24143 
24144  heap_clean_get_context (thread_p, &context);
24145 
24146  return scan;
24147 }
24148 
24149 /*
24150  * heap_get_visible_version_internal () - Retrieve the visible version of an object according to snapshot
24151  *
24152  * return SCAN_CODE.
24153  * thread_p (in): Thread entry.
24154  * context (in): Heap get context.
24155  * is_heap_scan (in): required for heap_prepare_get_context
24156  */
24157 SCAN_CODE
24158 heap_get_visible_version_internal (THREAD_ENTRY * thread_p, HEAP_GET_CONTEXT * context, bool is_heap_scan)
24159 {
24160  SCAN_CODE scan;
24161 
24164  OID class_oid_local = OID_INITIALIZER;
24165 
24166  assert (context->scan_cache != NULL);
24167 
24168  if (context->class_oid_p == NULL)
24169  {
24170  /* we need class_oid to check if the class is mvcc enabled */
24171  context->class_oid_p = &class_oid_local;
24172  }
24173 
24174  if (context->scan_cache && context->ispeeking == COPY && context->recdes_p != NULL)
24175  {
24176  /* Allocate an area to hold the object. Assume that the object will fit in two pages for not better estimates. */
24177  if (heap_scan_cache_allocate_area (thread_p, context->scan_cache, DB_PAGESIZE * 2) != NO_ERROR)
24178  {
24179  return S_ERROR;
24180  }
24181  }
24182 
24183  scan = heap_prepare_get_context (thread_p, context, is_heap_scan, LOG_WARNING_IF_DELETED);
24184  if (scan != S_SUCCESS)
24185  {
24186  goto exit;
24187  }
24188  assert (context->record_type == REC_HOME || context->record_type == REC_BIGONE
24189  || context->record_type == REC_RELOCATION);
24190  assert (context->record_type == REC_HOME
24191  || (!OID_ISNULL (&context->forward_oid) && context->fwd_page_watcher.pgptr != NULL));
24192 
24193  if (context->scan_cache != NULL && context->scan_cache->mvcc_snapshot != NULL
24194  && context->scan_cache->mvcc_snapshot->snapshot_fnc != NULL
24196  {
24198  }
24199 
24200  if (mvcc_snapshot != NULL || context->old_chn != NULL_CHN)
24201  {
24202  /* mvcc header is needed for visibility check or chn check */
24203  scan = heap_get_mvcc_header (thread_p, context, &mvcc_header);
24204  if (scan != S_SUCCESS)
24205  {
24206  goto exit;
24207  }
24208  }
24209 
24210  if (mvcc_snapshot != NULL)
24211  {
24212  MVCC_SATISFIES_SNAPSHOT_RESULT snapshot_res;
24213 
24214  snapshot_res = mvcc_snapshot->snapshot_fnc (thread_p, &mvcc_header, mvcc_snapshot);
24215  if (snapshot_res == TOO_NEW_FOR_SNAPSHOT)
24216  {
24217  /* current version is not visible, check previous versions from log and skip record get from heap */
24218  scan =
24219  heap_get_visible_version_from_log (thread_p, context->recdes_p, &MVCC_GET_PREV_VERSION_LSA (&mvcc_header),
24220  context->scan_cache, context->old_chn);
24221  goto exit;
24222  }
24223  else if (snapshot_res == TOO_OLD_FOR_SNAPSHOT)
24224  {
24225  scan = S_SNAPSHOT_NOT_SATISFIED;
24226  goto exit;
24227  }
24228  /* else...fall through to heap get */
24229  }
24230 
24231  if (MVCC_IS_CHN_UPTODATE (&mvcc_header, context->old_chn))
24232  {
24233  /* Object version didn't change and CHN is up-to-date. Don't get record data and return
24234  * S_SUCCESS_CHN_UPTODATE instead. */
24235  scan = S_SUCCESS_CHN_UPTODATE;
24236  goto exit;
24237  }
24238 
24239  if (context->recdes_p != NULL)
24240  {
24241  scan = heap_get_record_data_when_all_ready (thread_p, context);
24242  }
24243 
24244  /* Fall through to exit. */
24245 
24246 exit:
24247  return scan;
24248 }
24249 
24250 /*
24251  * heap_update_set_prev_version () - Set prev version lsa to record according to its type.
24252  *
24253  * return : error code or NO_ERROR
24254  * thread_p (in) : Thread entry.
24255  * oid (in) : Object identifier of the updated record
24256  * home_pg_watcher (in): Home page watcher; must be
24257  * fwd_pg_watcher (in) : Forward page watcher
24258  * prev_version_lsa(in): LSA address of undo log record of the old record
24259  *
24260  * Note: This function works only with heap_update_home/relocation/bigone functions. It is designed to set the
24261  * prev_version_lsa to updated records by overwriting this information directly into heap file. The header of the
24262  * record should be prepared for this in heap_insert_adjust_recdes_header().
24263  * The records are obtained using PEEK, and modified directly, without using spage_update afterwards!
24264  * Note: It is expected to have the home page fixed and also the forward page in case of relocation.
24265  */
24266 static int
24267 heap_update_set_prev_version (THREAD_ENTRY * thread_p, const OID * oid, PGBUF_WATCHER * home_pg_watcher,
24268  PGBUF_WATCHER * fwd_pg_watcher, LOG_LSA * prev_version_lsa)
24269 {
24270  int error_code = NO_ERROR;
24271  RECDES recdes, forward_recdes;
24272  VPID fwd_vpid;
24273  OID forward_oid;
24274  PGBUF_WATCHER overflow_pg_watcher;
24275 
24276  assert (oid != NULL && !OID_ISNULL (oid) && prev_version_lsa != NULL && !LSA_ISNULL (prev_version_lsa));
24277  assert (prev_version_lsa->pageid > 0 && prev_version_lsa->offset >= 0);
24278 
24279  /* the home page should be already fixed */
24280  assert (home_pg_watcher != NULL && home_pg_watcher->pgptr != NULL);
24281  if (spage_get_record (thread_p, home_pg_watcher->pgptr, oid->slotid, &recdes, PEEK) != S_SUCCESS)
24282  {
24283  ASSERT_ERROR_AND_SET (error_code);
24284  goto end;
24285  }
24286 
24287  if (recdes.type == REC_HOME)
24288  {
24289  error_code = or_mvcc_set_log_lsa_to_record (&recdes, prev_version_lsa);
24290  if (error_code != NO_ERROR)
24291  {
24292  assert (false);
24293  goto end;
24294  }
24295 
24296  pgbuf_set_dirty (thread_p, home_pg_watcher->pgptr, DONT_FREE);
24297  }
24298  else if (recdes.type == REC_RELOCATION)
24299  {
24300  forward_oid = *((OID *) recdes.data);
24301  VPID_GET_FROM_OID (&fwd_vpid, &forward_oid);
24302 
24303  /* the forward page should be already fixed */
24304  assert (fwd_pg_watcher != NULL && fwd_pg_watcher->pgptr != NULL);
24305  assert (VPID_EQ (&fwd_vpid, pgbuf_get_vpid_ptr (fwd_pg_watcher->pgptr)));
24306 
24307  if (spage_get_record (thread_p, fwd_pg_watcher->pgptr, forward_oid.slotid, &forward_recdes, PEEK) != S_SUCCESS)
24308  {
24309  ASSERT_ERROR_AND_SET (error_code);
24310  goto end;
24311  }
24312 
24313  error_code = or_mvcc_set_log_lsa_to_record (&forward_recdes, prev_version_lsa);
24314  if (error_code != NO_ERROR)
24315  {
24316  assert (false);
24317  goto end;
24318  }
24319 
24320  pgbuf_set_dirty (thread_p, fwd_pg_watcher->pgptr, DONT_FREE);
24321  }
24322  else if (recdes.type == REC_BIGONE)
24323  {
24324  forward_oid = *((OID *) recdes.data);
24325 
24326  VPID_GET_FROM_OID (&fwd_vpid, &forward_oid);
24328  PGBUF_WATCHER_COPY_GROUP (&overflow_pg_watcher, home_pg_watcher);
24329  if (pgbuf_ordered_fix (thread_p, &fwd_vpid, OLD_PAGE, PGBUF_LATCH_WRITE, &overflow_pg_watcher) != NO_ERROR)
24330  {
24331  ASSERT_ERROR_AND_SET (error_code);
24332  goto end;
24333  }
24334 
24335  forward_recdes.data = overflow_get_first_page_data (overflow_pg_watcher.pgptr);
24336  forward_recdes.length = OR_HEADER_SIZE (forward_recdes.data);
24337 
24338  error_code = or_mvcc_set_log_lsa_to_record (&forward_recdes, prev_version_lsa);
24339 
24340  /* unfix overflow page; it is used only locally */
24341  pgbuf_set_dirty (thread_p, overflow_pg_watcher.pgptr, DONT_FREE);
24342  pgbuf_ordered_unfix (thread_p, &overflow_pg_watcher);
24343 
24344  if (error_code != NO_ERROR)
24345  {
24346  assert (false);
24347  goto end;
24348  }
24349  }
24350  else
24351  {
24352  /* Unexpected record type. */
24353  assert (false);
24354  error_code = ER_FAILED;
24355  }
24356 
24357 end:
24358  return error_code;
24359 }
24360 
24361 /*
24362  * heap_get_last_version () - Generic function for retrieving last version of heap objects (not considering visibility)
24363  *
24364  * return : Scan code.
24365  * thread_p (in) : Thread entry.
24366  * context (in) : Heap get context
24367  *
24368  * NOTE: Caller must handle the cleanup of context
24369  */
24370 SCAN_CODE
24372 {
24373  SCAN_CODE scan = S_SUCCESS;
24375 
24376  assert (context->scan_cache != NULL);
24377  assert (context->recdes_p != NULL);
24378 
24379  if (context->scan_cache && context->ispeeking == COPY)
24380  {
24381  /* Allocate an area to hold the object. Assume that the object will fit in two pages for not better estimates. */
24382  if (heap_scan_cache_allocate_area (thread_p, context->scan_cache, DB_PAGESIZE * 2) != NO_ERROR)
24383  {
24384  return S_ERROR;
24385  }
24386  }
24387 
24388  scan = heap_prepare_get_context (thread_p, context, false, LOG_WARNING_IF_DELETED);
24389  if (scan != S_SUCCESS)
24390  {
24391  goto exit;
24392  }
24393  assert (context->record_type == REC_HOME || context->record_type == REC_BIGONE
24394  || context->record_type == REC_RELOCATION);
24395  assert (context->record_type == REC_HOME
24396  || (!OID_ISNULL (&context->forward_oid) && context->fwd_page_watcher.pgptr != NULL));
24397 
24398  scan = heap_get_mvcc_header (thread_p, context, &mvcc_header);
24399  if (scan != S_SUCCESS)
24400  {
24401  goto exit;
24402  }
24403 
24404  if (MVCC_IS_CHN_UPTODATE (&mvcc_header, context->old_chn))
24405  {
24406  /* Object version didn't change and CHN is up-to-date. Don't get record data and return
24407  * S_SUCCESS_CHN_UPTODATE instead. */
24408  scan = S_SUCCESS_CHN_UPTODATE;
24409  goto exit;
24410  }
24411 
24412  if (context->recdes_p != NULL)
24413  {
24414  scan = heap_get_record_data_when_all_ready (thread_p, context);
24415  }
24416 
24417  /* Fall through to exit. */
24418 
24419 exit:
24420 
24421  return scan;
24422 }
24423 
24424 /*
24425  * heap_prepare_object_page () - Check if provided page matches the page of provided OID or fix the right one.
24426  *
24427  * return : Error code.
24428  * thread_p (in) : Thread entry.
24429  * oid (in) : Object identifier.
24430  * page_watcher_p(out) : Page watcher used for page fix.
24431  * latch_mode (in) : Latch mode.
24432  */
24433 int
24434 heap_prepare_object_page (THREAD_ENTRY * thread_p, const OID * oid, PGBUF_WATCHER * page_watcher_p,
24435  PGBUF_LATCH_MODE latch_mode)
24436 {
24437  VPID object_vpid;
24438  int ret = NO_ERROR;
24439 
24440  assert (oid != NULL && !OID_ISNULL (oid));
24441 
24442  VPID_GET_FROM_OID (&object_vpid, oid);
24443 
24444  if (page_watcher_p->pgptr != NULL && !VPID_EQ (pgbuf_get_vpid_ptr (page_watcher_p->pgptr), &object_vpid))
24445  {
24446  /* unfix provided page if it does not correspond to the VPID */
24447  pgbuf_ordered_unfix (thread_p, page_watcher_p);
24448  }
24449 
24450  if (page_watcher_p->pgptr == NULL)
24451  {
24452  /* fix required page */
24453  ret = pgbuf_ordered_fix (thread_p, &object_vpid, OLD_PAGE, latch_mode, page_watcher_p);
24454  if (ret != NO_ERROR)
24455  {
24456  if (ret == ER_PB_BAD_PAGEID)
24457  {
24458  /* maybe this error could be removed */
24460  oid->slotid);
24461  ret = ER_HEAP_UNKNOWN_OBJECT;
24462  }
24463 
24464  if (ret == ER_LK_PAGE_TIMEOUT && er_errid () == NO_ERROR)
24465  {
24467  ret = ER_PAGE_LATCH_ABORTED;
24468  }
24469  }
24470  }
24471 
24472  return ret;
24473 }
24474 
24475 /*
24476  * heap_clean_get_context () - Unfix page watchers of get context and save home page to scan_cache if possible
24477  *
24478  * thread_p (in) : Thread_identifier.
24479  * context (in) : Heap get context.
24480  */
24481 void
24483 {
24484  assert (context != NULL);
24485 
24486  if (context->scan_cache != NULL && context->scan_cache->cache_last_fix_page
24487  && context->home_page_watcher.pgptr != NULL)
24488  {
24489  /* Save home page (or NULL if it had to be unfixed) to scan_cache. */
24490  pgbuf_replace_watcher (thread_p, &context->home_page_watcher, &context->scan_cache->page_watcher);
24491  assert (context->home_page_watcher.pgptr == NULL);
24492  }
24493 
24494  if (context->home_page_watcher.pgptr)
24495  {
24496  /* Unfix home page. */
24497  pgbuf_ordered_unfix (thread_p, &context->home_page_watcher);
24498  }
24499 
24500  if (context->fwd_page_watcher.pgptr != NULL)
24501  {
24502  /* Unfix forward page. */
24503  pgbuf_ordered_unfix (thread_p, &context->fwd_page_watcher);
24504  }
24505 
24506  assert (context->home_page_watcher.pgptr == NULL && context->fwd_page_watcher.pgptr == NULL);
24507 }
24508 
24509 /*
24510  * heap_init_get_context () - Initiate all heap get context fields with generic informations
24511  *
24512  * thread_p (in) : Thread_identifier.
24513  * context (out) : Heap get context.
24514  * oid (in) : Object identifier.
24515  * class_oid (in) : Class oid.
24516  * recdes (in) : Record descriptor.
24517  * scan_cache (in) : Scan cache.
24518  * is_peeking (in) : PEEK or COPY.
24519  * old_chn (in) : Cache coherency number.
24520 */
24521 void
24522 heap_init_get_context (THREAD_ENTRY * thread_p, HEAP_GET_CONTEXT * context, const OID * oid, OID * class_oid,
24523  RECDES * recdes, HEAP_SCANCACHE * scan_cache, int ispeeking, int old_chn)
24524 {
24525  context->oid_p = oid;
24526  context->class_oid_p = class_oid;
24527  OID_SET_NULL (&context->forward_oid);
24528  context->recdes_p = recdes;
24529 
24530  if (scan_cache != NULL && !HFID_IS_NULL (&scan_cache->node.hfid))
24531  {
24534  }
24535  else
24536  {
24539  }
24540 
24541  if (scan_cache != NULL && scan_cache->cache_last_fix_page && scan_cache->page_watcher.pgptr != NULL)
24542  {
24543  /* switch to local page watcher */
24544  pgbuf_replace_watcher (thread_p, &scan_cache->page_watcher, &context->home_page_watcher);
24545  }
24546 
24547  context->scan_cache = scan_cache;
24548  context->ispeeking = ispeeking;
24549  context->old_chn = old_chn;
24550  if (scan_cache != NULL && scan_cache->page_latch == X_LOCK)
24551  {
24552  context->latch_mode = PGBUF_LATCH_WRITE;
24553  }
24554  else
24555  {
24556  context->latch_mode = PGBUF_LATCH_READ;
24557  }
24558 }
24559 
24560 /*
24561  * heap_scan_cache_allocate_area () - Allocate scan_cache area
24562  *
24563  * return: error code
24564  * thread_p (in) : Thread entry.
24565  * scan_cache_p (in) : Scan cache.
24566  * size (in) : Required size of recdes data.
24567  */
24568 int
24570 {
24571  assert (scan_cache_p != NULL && size > 0);
24572  if (scan_cache_p->area == NULL)
24573  {
24574  /* Allocate an area to hold the object. Assume that the object will fit in two pages for not better estimates. */
24575  scan_cache_p->area = (char *) db_private_alloc (thread_p, size);
24576  if (scan_cache_p->area == NULL)
24577  {
24578  scan_cache_p->area_size = -1;
24580  return ER_OUT_OF_VIRTUAL_MEMORY;
24581  }
24582  scan_cache_p->area_size = size;
24583  }
24584  else if (scan_cache_p->area_size < size)
24585  {
24586  scan_cache_p->area = (char *) db_private_realloc (thread_p, scan_cache_p->area, size);
24587  if (scan_cache_p->area == NULL)
24588  {
24589  scan_cache_p->area_size = -1;
24591  return ER_OUT_OF_VIRTUAL_MEMORY;
24592  }
24593  scan_cache_p->area_size = size;
24594  }
24595 
24596  return NO_ERROR;
24597 }
24598 
24599 /*
24600  * heap_scan_cache_allocate_recdes_data () - Allocate recdes data and set it to recdes
24601  *
24602  * return: error code
24603  * thread_p (in) : Thread entry.
24604  * scan_cache_p (in) : Scan cache.
24605  * recdes_p (in) : Record descriptor.
24606  * size (in) : Required size of recdes data.
24607  */
24608 static int
24609 heap_scan_cache_allocate_recdes_data (THREAD_ENTRY * thread_p, HEAP_SCANCACHE * scan_cache_p, RECDES * recdes_p,
24610  int size)
24611 {
24612  int error_code;
24613  assert (scan_cache_p != NULL && recdes_p != NULL);
24614 
24615  error_code = heap_scan_cache_allocate_area (thread_p, scan_cache_p, size);
24616  if (error_code != NO_ERROR)
24617  {
24618  return error_code;
24619  }
24620 
24621  recdes_p->data = scan_cache_p->area;
24622  recdes_p->area_size = scan_cache_p->area_size;
24623 
24624  return NO_ERROR;
24625 }
24626 
24627 /*
24628  * heap_get_class_record () - Retrieves class objects only
24629  *
24630  * return SCAN_CODE: S_SUCCESS or error
24631  * thread_p (in) : Thread entry.
24632  * class_oid (in) : Class object identifier.
24633  * recdes_p (out) : Record descriptor.
24634  * scan_cache (in) : Scan cache.
24635  * ispeeking (in) : PEEK or COPY
24636  */
24637 SCAN_CODE
24638 heap_get_class_record (THREAD_ENTRY * thread_p, const OID * class_oid, RECDES * recdes_p, HEAP_SCANCACHE * scan_cache,
24639  int ispeeking)
24640 {
24641  HEAP_GET_CONTEXT context;
24642  OID root_oid = *oid_Root_class_oid;
24643  SCAN_CODE scan;
24644 
24645 #if !defined(NDEBUG)
24646  /* for debugging set root_oid NULL and check afterwards if it really is root oid */
24647  OID_SET_NULL (&root_oid);
24648 #endif /* !NDEBUG */
24649  heap_init_get_context (thread_p, &context, class_oid, &root_oid, recdes_p, scan_cache, ispeeking, NULL_CHN);
24650 
24651  scan = heap_get_last_version (thread_p, &context);
24652 
24653  heap_clean_get_context (thread_p, &context);
24654 
24655 #if !defined(NDEBUG)
24656  assert (OID_ISNULL (&root_oid) || OID_IS_ROOTOID (&root_oid));
24657 #endif /* !NDEBUG */
24658 
24659  return scan;
24660 }
24661 
24662 /*
24663  * heap_rv_undo_ovf_update - Assure undo record corresponds with vacuum status
24664  *
24665  * return : int
24666  * thread_p (in): Thread entry.
24667  * rcv (in) : Recovery structure.
24668  */
24669 int
24671 {
24672  int error_code;
24673 
24674  error_code = vacuum_rv_check_at_undo (thread_p, rcv->pgptr, NULL_SLOTID, REC_BIGONE);
24675 
24676  pgbuf_set_dirty (thread_p, rcv->pgptr, DONT_FREE);
24677 
24678  return error_code;
24679 }
24680 
24681 /*
24682  * heap_get_best_space_num_stats_entries - Returns the number of num_stats_entries
24683  * return : the number of entries in the heap
24684  *
24685  */
24686 int
24688 {
24689  return heap_Bestspace->num_stats_entries;
24690 }
24691 
24692 /*
24693  * heap_get_hfid_from_vfid () - Get hfid for file. Caller must be sure this file belong to a heap.
24694  *
24695  * return : error code
24696  * thread_p (in) : thread entry
24697  * vfid (in) : file identifier
24698  * hfid (out) : heap identifier
24699  */
24700 int
24702 {
24703  VPID vpid_header;
24704  int error_code = NO_ERROR;
24705 
24706  hfid->vfid = *vfid;
24707  error_code = heap_get_header_page (thread_p, hfid, &vpid_header);
24708  if (error_code != NO_ERROR)
24709  {
24710  ASSERT_ERROR ();
24711  VFID_SET_NULL (&hfid->vfid);
24712  return error_code;
24713  }
24714  assert (hfid->vfid.volid == vpid_header.volid);
24715  hfid->hpgid = vpid_header.pageid;
24716  return NO_ERROR;
24717 }
24718 
24719 /*
24720  * heap_is_page_header () - return true if page is a heap header page. must be heap page though!
24721  *
24722  * return : true if file header page, false otherwise.
24723  * thread_p (in) : thread entry
24724  * page (in) : heap page
24725  */
24726 bool
24728 {
24730  SPAGE_SLOT *slotp;
24731 
24732  /* todo: why not set a different page ptype. */
24733 
24734  assert (page != NULL && pgbuf_get_page_ptype (thread_p, page) == PAGE_HEAP);
24735 
24736  spage_header = (SPAGE_HEADER *) page;
24737  if (spage_header->num_records <= 0)
24738  {
24739  return false;
24740  }
24742  if (slotp == NULL)
24743  {
24744  return false;
24745  }
24746  if (slotp->record_length == sizeof (HEAP_HDR_STATS))
24747  {
24748  return true;
24749  }
24750  return false;
24751 }
SCAN_CODE heap_get_record_data_when_all_ready(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context)
Definition: heap_file.c:7596
PGLENGTH offset
Definition: recovery.h:194
void stx_free_xasl_unpack_info(void *xasl_unpack_info)
HEAP_CLASSREPR_LOCK * lock_next
Definition: heap_file.c:333
INT64 offset
#define HEAP_PAGE_SET_VACUUM_STATUS(chain, status)
Definition: heap_file.c:232
DISK_ISVALID not_vacuumed_res
Definition: heap_file.c:285
OR_ATTRIBUTE * last_attrepr
Definition: heap_attrinfo.h:56
char * PAGE_PTR
MIN_MAX_COLUMN_INFO min_max_val
Definition: dbtype_def.h:892
#define OID_INITIALIZER
Definition: oid.h:37
#define OR_SET_VAR_OFFSET_SIZE(val, offset_size)
DB_C_FLOAT db_get_float(const DB_VALUE *value)
int char_isspace(int c)
Definition: chartype.c:110
int heap_vpid_prev(THREAD_ENTRY *thread_p, const HFID *hfid, PAGE_PTR pgptr, VPID *prev_vpid)
Definition: heap_file.c:4913
#define INLINE
Definition: porting.h:56
PAGE_PTR pgbuf_fix_debug(THREAD_ENTRY *thread_p, const VPID *vpid, PAGE_FETCH_MODE fetch_mode, PGBUF_LATCH_MODE request_mode, PGBUF_LATCH_CONDITION condition, const char *caller_file, int caller_line)
Definition: page_buffer.c:1774
#define HEAP_PERF_TRACK_PREPARE(thread_p, context)
Definition: heap_file.c:503
#define ER_PAGE_LATCH_ABORTED
Definition: error_code.h:1070
#define CLASSREPR_HASH_SIZE
Definition: heap_file.c:390
DB_VALUE * heap_attrinfo_access(ATTR_ID attrid, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10559
#define PGBUF_PAGE_VPID_AS_ARGS(pg)
Definition: page_buffer.h:51
SCAN_CODE heap_capacity_next_scan(THREAD_ENTRY *thread_p, int cursor, DB_VALUE **out_values, int out_cnt, void *ptr)
Definition: heap_file.c:17860
SCAN_CODE heap_scanrange_next(THREAD_ENTRY *thread_p, OID *next_oid, RECDES *recdes, HEAP_SCANRANGE *scan_range, int ispeeking)
Definition: heap_file.c:8373
void heap_scancache_end_modify(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:7112
OR_ATTRIBUTE * shared_attrs
bool tp_domain_references_objects(const TP_DOMAIN *dom)
int tp_domain_disk_size(TP_DOMAIN *domain)
#define PGBUF_WATCHER_COPY_GROUP(w_dst, w_src)
Definition: page_buffer.h:99
DB_VALUE * heap_attrvalue_get_key(THREAD_ENTRY *thread_p, int btid_index, HEAP_CACHE_ATTRINFO *idx_attrinfo, RECDES *recdes, BTID *btid, DB_VALUE *db_value, char *buf, FUNC_PRED_UNPACK_INFO *func_indx_pred, TP_DOMAIN **key_domain)
Definition: heap_file.c:12652
#define OR_MVCC_FLAG_VALID_DELID
int heap_object_upgrade_domain(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *upd_scancache, HEAP_CACHE_ATTRINFO *attr_info, OID *oid, const ATTR_ID att_id)
Definition: heap_file.c:16865
#define vacuum_er_log_error(er_log_level, msg,...)
Definition: vacuum.h:67
int heap_rv_mark_deleted_on_postpone(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:5749
int qexec_clear_func_pred(THREAD_ENTRY *thread_p, FUNC_PRED *func_pred)
#define realloc(p, n)
Definition: leak_detector.h:6
#define pgbuf_attach_watcher(...)
Definition: page_buffer.h:382
int xheap_get_class_num_objects_pages(THREAD_ENTRY *thread_p, const HFID *hfid, int approximation, int *nobjs, int *npages)
Definition: heap_file.c:16234
cubthread::entry * thread_get_thread_entry_info(void)
int db_value_coerce(const DB_VALUE *src, DB_VALUE *dest, const DB_DOMAIN *desired_domain)
Definition: db_macro.c:1774
#define NO_ERROR
Definition: error_code.h:42
int heap_header_capacity_start_scan(THREAD_ENTRY *thread_p, int show_type, DB_VALUE **arg_values, int arg_cnt, void **ptr)
Definition: heap_file.c:17507
int(* setval)(DB_VALUE *dest, const DB_VALUE *src, bool copy)
int mht_map_no_key(THREAD_ENTRY *thread_p, const MHT_TABLE *ht, int(*map_func)(THREAD_ENTRY *thread_p, void *data, void *args), void *func_args)
Definition: memory_hash.c:1988
int j
Definition: mprec.c:540
n
Definition: dtoa.c:48
int area_size
void log_append_redo_data(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int length, const void *data)
Definition: log_manager.c:2009
#define LOG_IS_MVCC_HEAP_OPERATION(rcvindex)
Definition: log_impl.h:1477
int or_rep_id(RECDES *record)
#define strdup(s)
Definition: leak_detector.h:8
#define __attribute__(X)
Definition: porting.h:37
int heap_get_hfid_from_class_oid(THREAD_ENTRY *thread_p, const OID *class_oid, HFID *hfid)
Definition: heap_file.c:16563
#define MVCC_IS_HEADER_DELID_VALID(rec_header_p)
Definition: mvcc.h:69
DB_COLLECTION * db_get_set(const DB_VALUE *value)
void er_stack_push(void)
int heap_get_class_oid_from_page(THREAD_ENTRY *thread_p, PAGE_PTR page_p, OID *class_oid)
Definition: heap_file.c:18786
#define MVCC_GET_INSID(header)
Definition: mvcc.h:33
HEAP_CLASSREPR_HASH * hash_table
Definition: heap_file.c:358
#define LANG_SYS_COLLATION
int pr_data_writeval_disk_size(DB_VALUE *value)
MVCC_SNAPSHOT * logtb_get_mvcc_snapshot(THREAD_ENTRY *thread_p)
#define MVCC_IS_CHN_UPTODATE(rec_header_p, chn)
Definition: mvcc.h:115
#define pgbuf_ordered_fix(thread_p, req_vpid, fetch_mode, requestmode, req_watcher)
Definition: page_buffer.h:252
#define VACUUM_ER_LOG_RECOVERY
Definition: vacuum.h:53
#define LF_EM_NOT_USING_MUTEX
Definition: lock_free.h:60
void log_append_undoredo_data2(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, const VFID *vfid, PAGE_PTR pgptr, PGLENGTH offset, int undo_length, int redo_length, const void *undo_data, const void *redo_data)
Definition: log_manager.c:1891
TP_DOMAIN_STATUS tp_domain_check(const TP_DOMAIN *domain, const DB_VALUE *value, TP_MATCH exact_match)
int xheap_has_instance(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, int has_visible_instance)
Definition: heap_file.c:16267
#define IO_PAGESIZE
SCAN_CODE heap_last(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking)
Definition: heap_file.c:8016
OR_CLASSREP * read_classrepr
Definition: heap_attrinfo.h:70
int db_value_domain_min(DB_VALUE *value, const DB_TYPE type, const int precision, const int scale, const int codeset, const int collation_id, const DB_ENUMERATION *enumeration)
Definition: db_macro.c:409
int vacuum_heap_page(THREAD_ENTRY *thread_p, VACUUM_HEAP_OBJECT *heap_objects, int n_heap_objects, MVCCID threshold_mvccid, HFID *hfid, bool *reusable, bool was_interrupted)
Definition: vacuum.c:1266
HEAP_SCANCACHE * scan_cache_p
Definition: heap_file.h:259
#define HEAP_RV_FLAG_VACUUM_STATUS_CHANGE
Definition: heap_file.c:499
void log_append_undoredo_crumbs(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int num_undo_crumbs, int num_redo_crumbs, const LOG_CRUMB *undo_crumbs, const LOG_CRUMB *redo_crumbs)
Definition: log_manager.c:2060
OR_FUNCTION_INDEX * func_index_info
#define ASSERT_ERROR()
SCAN_CODE
#define OID_GT(oidp1, oidp2)
Definition: oid.h:98
SCAN_CODE heap_scanrange_to_prior(THREAD_ENTRY *thread_p, HEAP_SCANRANGE *scan_range, OID *last_oid)
Definition: heap_file.c:8267
VACUUM_HEAP_OBJECT * heap_objects
Definition: vacuum.h:131
STATIC_INLINE HEAP_CHAIN * heap_get_chain_ptr(THREAD_ENTRY *thread_p, PAGE_PTR page_heap) __attribute__((ALWAYS_INLINE))
Definition: heap_file.c:4012
#define HEAP_MAYNEED_DECACHE_GUESSED_LASTREPRS(class_oid, hfid)
Definition: heap_file.c:393
int file_descriptor_update(THREAD_ENTRY *thread_p, const VFID *vfid, void *des_new)
unsigned int of_local_next
Definition: lock_free.h:67
HEAP_BESTSPACE best[HEAP_NUM_BEST_SPACESTATS]
Definition: heap_file.c:208
void lf_hash_destroy(LF_HASH_TABLE *table)
Definition: lock_free.c:1935
void logpb_fatal_error(THREAD_ENTRY *thread_p, bool logexit, const char *file_name, const int lineno, const char *fmt,...)
int heap_rv_redo_reuse_page(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16121
int
Definition: mprec.c:252
#define HFID_INITIALIZER
int file_dealloc(THREAD_ENTRY *thread_p, const VFID *vfid, const VPID *vpid, FILE_TYPE file_type_hint)
for(p=libs; *p;p++)
Definition: dynamic_load.c:969
#define QSTR_IS_ANY_CHAR(s)
Definition: string_opfunc.h:47
unsigned char codeset
Definition: object_domain.h:93
bool is_redistribute_insert_with_delid
Definition: heap_file.h:286
#define malloc(n)
Definition: leak_detector.h:3
int heap_scancache_start_modify(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, const HFID *hfid, const OID *class_oid, int op_type, MVCC_SNAPSHOT *mvcc_snapshot)
Definition: heap_file.c:6723
bool cache_last_fix_page
Definition: heap_file.h:139
bool schema_change
Definition: heap_file.c:449
int heap_scancache_quick_start_modify_with_class_oid(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, OID *class_oid)
Definition: heap_file.c:19288
LF_ENTRY_INITIALIZE_FUNC f_init
Definition: lock_free.h:91
SCAN_CODE heap_get_visible_version(THREAD_ENTRY *thread_p, const OID *oid, OID *class_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking, int old_chn)
Definition: heap_file.c:24097
int heap_attrinfo_delete_lob(THREAD_ENTRY *thread_p, RECDES *recdes, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10358
#define TP_IS_SET_TYPE(typenum)
#define ER_TF_BUFFER_OVERFLOW
Definition: error_code.h:384
k1
Definition: mprec.c:488
#define OR_INT_SIZE
void set_free(DB_COLLECTION *set)
Definition: set_object.c:2551
int spage_insert(THREAD_ENTRY *thread_p, PAGE_PTR page_p, RECDES *record_descriptor_p, PGSLOTID *out_slot_id_p)
#define VPID_COPY(dest_ptr, src_ptr)
Definition: dbtype_def.h:934
PAGEID pgbuf_get_page_id(PAGE_PTR pgptr)
Definition: page_buffer.c:4592
int db_make_bigint(DB_VALUE *value, const DB_BIGINT num)
int db_get_int(const DB_VALUE *value)
int heap_prepare_object_page(THREAD_ENTRY *thread_p, const OID *oid, PGBUF_WATCHER *page_watcher_p, PGBUF_LATCH_MODE latch_mode)
Definition: heap_file.c:24434
int heap_manager_finalize(void)
Definition: heap_file.c:4996
void heap_finalize_hfid_table(void)
Definition: heap_file.c:23123
SCAN_CODE heap_prepare_get_context(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context, bool is_heap_scan, NON_EXISTENT_HANDLING non_ex_handling_type)
Definition: heap_file.c:7267
#define ER_HEAP_NODATA_NEWADDRESS
Definition: error_code.h:103
FILE_TYPE file_type
Definition: heap_file.h:145
int logtb_get_number_of_total_tran_indices(void)
DB_TYPE
Definition: dbtype_def.h:695
LOG_RCVINDEX
Definition: recovery.h:36
LOG_LSA * log_get_append_lsa(void)
Definition: log_manager.c:604
LOG_LSA * logtb_find_current_tran_lsa(THREAD_ENTRY *thread_p)
DB_C_DOUBLE db_get_double(const DB_VALUE *value)
#define ER_FAILED
Definition: error_code.h:43
#define OR_MVCC_DELETE_ID_SIZE
PGBUF_WATCHER overflow_page_watcher
Definition: heap_file.h:273
void spage_initialize(THREAD_ENTRY *thread_p, PAGE_PTR page_p, INT16 slot_type, unsigned short alignment, bool is_saving)
OID * oid_Root_class_oid
Definition: oid.c:73
OR_ATTRIBUTE * class_attrs
#define OR_SHORT_SIZE
bool mvcc_is_mvcc_disabled_class(const OID *class_oid)
Definition: mvcc.c:720
pthread_mutex_t hash_mutex
Definition: heap_file.c:330
int file_get_type(THREAD_ENTRY *thread_p, const VFID *vfid, FILE_TYPE *ftype_out)
#define csect_enter(a, b, c)
Definition: cnv.c:138
TRAN_ABORT_REASON tran_abort_reason
Definition: log_impl.h:1753
#define DEFAULT_REPR_INCREMENT
Definition: heap_file.c:291
DB_COLLECTION * set_copy(DB_COLLECTION *set)
Definition: set_object.c:2464
#define OR_MVCC_MAX_HEADER_SIZE
int file_tracker_reuse_heap(THREAD_ENTRY *thread_p, const OID *class_oid, HFID *hfid_out)
DISK_ISVALID file_check_vpid(THREAD_ENTRY *thread_p, const VFID *vfid, const VPID *vpid_lookup)
BTREE_TYPE
SCAN_CODE spage_previous_record_dont_skip_empty(PAGE_PTR page_p, PGSLOTID *out_slot_id_p, RECDES *record_descriptor_p, int is_peeking)
int mht_rem(MHT_TABLE *ht, const void *key, int(*rem_func)(const void *key, void *data, void *args), void *func_args)
Definition: memory_hash.c:1709
LF_ENTRY_UNINITIALIZE_FUNC f_uninit
Definition: lock_free.h:94
FILE_TYPE
Definition: file_manager.h:38
LOG_HDRPAGE hdr
Definition: log_impl.h:145
#define HEAP_BIT_SET(byte_ptr, bit_num)
Definition: heap_file.c:423
REPR_ID heap_get_class_repr_id(THREAD_ENTRY *thread_p, OID *class_oid)
Definition: heap_file.c:16314
struct heap_hdr_stats::@210 estimates
int lock_has_lock_on_object(const OID *oid, const OID *class_oid, int tran_index, LOCK lock)
int db_make_varchar(DB_VALUE *value, const int max_char_length, const DB_C_CHAR str, const int char_str_byte_size, const int codeset, const int collation_id)
BTREE_UNIQUE_STATS * index_stat_info
Definition: heap_file.h:144
TP_DOMAIN_STATUS tp_value_auto_cast(const DB_VALUE *src, DB_VALUE *dest, const TP_DOMAIN *desired_domain)
bool recently_accessed
Definition: heap_file.c:435
int heap_rv_undo_update(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16037
#define HEAP_MAX_FIRSTSLOTID_LENGTH
MVCCID max_mvccid
Definition: heap_file.c:264
#define SAFEGUARD_RVSPACE
Definition: slotted_page.h:54
#define ER_CT_UNKNOWN_REPRID
Definition: error_code.h:489
HEAP_STATS_ENTRY * free_list
Definition: heap_file.c:465
void heap_rv_dump_chain(FILE *fp, int ignore_length, void *data)
Definition: heap_file.c:15401
const void * mht_put(MHT_TABLE *ht, const void *key, void *data)
Definition: memory_hash.c:1535
PGBUF_WATCHER home_page_watcher
Definition: heap_file.h:346
#define ASSERT_ERROR_AND_SET(error_code)
int(* index_writeval)(OR_BUF *buf, DB_VALUE *value)
SPAGE_SLOT * spage_get_slot(PAGE_PTR page_p, PGSLOTID slot_id)
Definition: lock_free.h:64
LF_ENTRY_FREE_FUNC f_free
Definition: lock_free.h:88
#define assert_release(e)
Definition: error_manager.h:97
HEAP_CLASSREPR_ENTRY * area
Definition: heap_file.c:356
void pgbuf_set_dirty(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, bool free_page)
Definition: page_buffer.c:4322
Definition: lock_free.h:122
char * overflow_get_first_page_data(char *page_ptr)
SCAN_CODE overflow_get(THREAD_ENTRY *thread_p, const VPID *ovf_vpid, RECDES *recdes, MVCC_SNAPSHOT *mvcc_snapshot)
int idx
Definition: heap_file.c:433
void spage_update_record_type(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, INT16 record_type)
Definition: heap_file.c:431
#define OR_OFFSET_SIZE_2BYTE
int pr_midxkey_init_boundbits(char *bufptr, int n_atts)
PGBUF_WATCHER * overflow_page_watcher_p
Definition: heap_file.h:279
void heap_page_set_vacuum_status_none(THREAD_ENTRY *thread_p, PAGE_PTR heap_page)
Definition: heap_file.c:23558
int lock_object(THREAD_ENTRY *thread_p, const OID *oid, const OID *class_oid, LOCK lock, int cond_flag)
#define ER_CSS_PTHREAD_MUTEX_LOCK
Definition: error_code.h:995
#define MVCCID_NULL
void log_sysop_start(THREAD_ENTRY *thread_p)
Definition: log_manager.c:3606
int length
Definition: log_impl.h:1808
#define OR_GET_BOUND_BIT_FLAG(ptr)
INT16 heap_rv_remove_flags_from_offset(INT16 offset)
Definition: heap_file.c:23704
int or_mvcc_get_header(RECDES *record, MVCC_REC_HEADER *mvcc_rec_header)
int num_substitutions
Definition: heap_file.c:197
int heap_scancache_quick_start_root_hfid(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:19210
int heap_dump_capacity(THREAD_ENTRY *thread_p, FILE *fp, const HFID *hfid)
Definition: heap_file.c:14132
#define OR_MVCCID_SIZE
int reserve2_for_future
Definition: heap_file.c:216
#define OR_MVCC_PREV_VERSION_LSA_OFFSET(mvcc_flags)
FILE_TYPE ftype
Definition: heap_file.h:181
#define LOG_SET_DATA_ADDR(data_addr, page, vol_file_id, off)
Definition: log_impl.h:1016
DISK_ISVALID heap_check_all_pages(THREAD_ENTRY *thread_p, HFID *hfid)
Definition: heap_file.c:13625
struct timeval TSCTIMEVAL
Definition: tsc_timer.h:41
#define OID_SET_NULL(oidp)
Definition: oid.h:86
int or_mvcc_add_header(RECDES *record, MVCC_REC_HEADER *mvcc_rec_header, int bound_bit, int variable_offset_size)
XML_StartCdataSectionHandler start
Definition: expat.h:539
#define OID_LT(oidp1, oidp2)
Definition: oid.h:114
#define OR_GET_BOUND_BITS(obj, nvars, fsize)
#define NULL_SLOTID
#define OR_VAR_ELEMENT_PTR(obj, index)
HEAP_CLASSREPR_ENTRY * free_top
Definition: heap_file.c:348
void thread_suspend_wakeup_and_unlock_entry(cubthread::entry *thread_p, thread_resume_suspend_status suspended_reason)
char * data
void heap_rv_dump_statistics(FILE *fp, int ignore_length, void *data)
Definition: heap_file.c:15384
int heap_get_best_space_num_stats_entries(void)
Definition: heap_file.c:24687
void tsc_elapsed_time_usec(TSCTIMEVAL *tv, TSC_TICKS end_tick, TSC_TICKS start_tick)
Definition: tsc_timer.c:102
int db_elo_copy(DB_ELO *src, DB_ELO *dest)
Definition: db_elo.c:102
MVCCID mvcc_id
Definition: recovery.h:191
struct spage_slot SPAGE_SLOT
Definition: slotted_page.h:85
TP_DOMAIN * tp_domain_copy(const TP_DOMAIN *domain, bool check_cache)
int lock_scan(THREAD_ENTRY *thread_p, const OID *class_oid, int cond_flag, LOCK class_lock)
PGNSLOTS num_records
Definition: slotted_page.h:65
int32_t pageid
Definition: dbtype_def.h:904
void or_free_classrep(OR_CLASSREP *rep)
#define lf_tran_end_with_mb(entry)
Definition: lock_free.h:245
HEAP_ATTRVALUE_STATE state
Definition: heap_attrinfo.h:52
INT32 root_pageid
TP_DOMAIN * tp_domain_find_charbit(DB_TYPE type, int codeset, int collation_id, unsigned char collation_flag, int precision, bool is_desc)
#define LOG_CS_ENTER(thread_p)
Definition: log_impl.h:810
void pgbuf_ordered_set_dirty_and_free(THREAD_ENTRY *thread_p, PGBUF_WATCHER *pg_watcher)
INT32 hpgid
#define MVCC_CLEAR_FLAG_BITS(rec_header_p, flag)
Definition: mvcc.h:83
#define BTID_IS_EQUAL(b1, b2)
PAGE_PTR pgbuf_flush_with_wal(THREAD_ENTRY *thread_p, PAGE_PTR pgptr)
Definition: page_buffer.c:2941
int xheap_destroy_newly_created(THREAD_ENTRY *thread_p, const HFID *hfid, const OID *class_oid)
Definition: heap_file.c:5690
int er_errid(void)
int max_reprid
Definition: heap_file.c:315
int heap_rv_mark_deleted_on_undo(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:5731
SCAN_CODE heap_get_last_version(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context)
Definition: heap_file.c:24371
int file_get_sticky_first_page(THREAD_ENTRY *thread_p, const VFID *vfid, VPID *vpid_out)
#define MVCC_SET_DELID(header, mvcc_id)
Definition: mvcc.h:42
LF_FREELIST hfid_hash_freelist
Definition: heap_file.h:166
#define SP_SUCCESS
Definition: slotted_page.h:51
#define VPID_INITIALIZER
Definition: dbtype_def.h:919
#define OR_BOUND_BIT_BYTES(count)
#define PGBUF_IS_CLEAN_WATCHER(w)
Definition: page_buffer.h:151
bool spage_reclaim(THREAD_ENTRY *thread_p, PAGE_PTR page_p)
int heap_scancache_end_when_scan_will_resume(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:7092
PAGE_TYPE
int heap_get_hfid_and_file_type_from_class_oid(THREAD_ENTRY *thread_p, const OID *class_oid, HFID *hfid_out, FILE_TYPE *ftype_out)
Definition: heap_file.c:16587
#define PTR_ALIGN(addr, boundary)
Definition: memory_alloc.h:78
int db_make_string_copy(DB_VALUE *value, const char *str)
int * offset
Definition: expat.h:891
#define HEAP_SCANCACHE_SET_NODE(scan_cache, class_oid_p, hfid_p)
Definition: heap_file.h:77
struct func_pred_unpack_info FUNC_PRED_UNPACK_INFO
Definition: heap_file.h:198
MVCCID heap_page_get_max_mvccid(THREAD_ENTRY *thread_p, PAGE_PTR heap_page)
Definition: heap_file.c:23601
void or_class_hfid(RECDES *record, HFID *hfid)
enum tp_domain_status TP_DOMAIN_STATUS
#define er_log_debug(...)
void spage_set_need_update_best_hint(THREAD_ENTRY *thread_p, PAGE_PTR page_p, bool need_update)
Definition: slotted_page.c:981
PGSLOTID spage_delete_for_recovery(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id)
int idx
Definition: heap_file.c:300
#define HEAP_CLASSREPR_MAXCACHE
Definition: heap_file.c:84
int pgbuf_ordered_fix_debug(THREAD_ENTRY *thread_p, const VPID *req_vpid, PAGE_FETCH_MODE fetch_mode, const PGBUF_LATCH_MODE request_mode, PGBUF_WATCHER *req_watcher, const char *caller_file, int caller_line)
int force_decache
Definition: heap_file.c:304
int or_mvcc_set_log_lsa_to_record(RECDES *record, LOG_LSA *lsa)
bool heap_does_exist(THREAD_ENTRY *thread_p, OID *class_oid, const OID *oid)
Definition: heap_file.c:8606
void * mht_get2(const MHT_TABLE *ht, const void *key, void **last)
Definition: memory_hash.c:1301
int heap_scancache_end(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:7077
HEAP_CLASSREPR_LOCK * lock_next
Definition: heap_file.c:323
#define ER_HEAP_FOUND_NOT_VACUUMED
Definition: error_code.h:1514
#define OR_VAR_OFFSET(obj, index)
HEAP_CHNGUESS_ENTRY * entries
Definition: heap_file.c:446
int file_dump(THREAD_ENTRY *thread_p, const VFID *vfid, FILE *fp)
OID oid
Definition: heap_file.c:436
VPID next_vpid
Definition: heap_file.c:263
int db_make_elo(DB_VALUE *value, DB_TYPE type, const DB_ELO *elo)
#define OR_GET_MVCCID
bool spage_is_slot_exist(PAGE_PTR page_p, PGSLOTID slot_id)
#define MAX_ALIGNMENT
Definition: memory_alloc.h:71
#define ER_ALTER_CHANGE_CAST_FAILED_SET_DEFAULT
Definition: error_code.h:1318
unsigned int oid_hash(const void *key_oid, unsigned int htsize)
Definition: oid.c:294
REPR_ID last_reprid
Definition: heap_file.c:316
int heap_attrinfo_start_with_index(THREAD_ENTRY *thread_p, OID *class_oid, RECDES *class_recdes, HEAP_CACHE_ATTRINFO *attr_info, HEAP_IDX_ELEMENTS_INFO *idx_info)
Definition: heap_file.c:11854
#define PGLENGTH_MAX
#define COPY_OID(dest_oid_ptr, src_oid_ptr)
Definition: oid.h:64
#define heap_scan_pb_lock_and_fetch(...)
Definition: heap_file.c:567
#define pthread_mutex_trylock(a)
Definition: heap_file.c:69
SCAN_CODE spage_get_record(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, RECDES *record_descriptor_p, int is_peeking)
HEAP_SCANCACHE_NODE_LIST * partition_list
Definition: heap_file.h:148
#define COPY
int heap_objects_capacity
Definition: vacuum.h:132
char * or_pack_mvccid(char *ptr, const MVCCID mvccid)
int mvcc_header_size_lookup[8]
#define DBVAL_BUFSIZE
Definition: btree.h:442
bool spage_is_updatable(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, int record_descriptor_length)
DB_DOMAIN_INFO domain
Definition: dbtype_def.h:1107
DB_ELO * db_get_elo(const DB_VALUE *value)
PGBUF_WATCHER fwd_page_watcher
Definition: heap_file.h:347
int xlogtb_reset_wait_msecs(THREAD_ENTRY *thread_p, int wait_msecs)
#define QSTR_IS_BIT(s)
Definition: string_opfunc.h:45
#define vacuum_er_log_warning(er_log_level, msg,...)
Definition: vacuum.h:71
#define OR_MVCC_FLAG_VALID_PREV_VERSION
int db_elo_delete(DB_ELO *elo)
Definition: db_elo.c:115
SCAN_CODE heap_get_mvcc_header(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context, MVCC_REC_HEADER *mvcc_header)
Definition: heap_file.c:7509
#define ER_HEAP_MISMATCH_NPAGES
Definition: error_code.h:706
int file_create_with_npages(THREAD_ENTRY *thread_p, FILE_TYPE file_type, int npages, FILE_DESCRIPTORS *des, VFID *vfid)
#define LF_HASH_TABLE_INITIALIZER
Definition: lock_free.h:364
VPID second_best[HEAP_NUM_BEST_SPACESTATS]
Definition: heap_file.c:207
#define VFID_ISNULL(vfid_ptr)
Definition: file_manager.h:72
SCAN_CODE heap_next_record_info(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *next_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking, DB_VALUE **cache_recordinfo)
Definition: heap_file.c:18530
void THREAD_ENTRY
int overflow_update(THREAD_ENTRY *thread_p, const VFID *ovf_vfid, const VPID *ovf_vpid, RECDES *recdes, FILE_TYPE file_type)
#define NULL_PAGEID
#define QSTR_IS_CHAR(s)
Definition: string_opfunc.h:41
HFID hfid
Definition: heap_file.c:222
#define MVCCID_ALL_VISIBLE
#define pgbuf_unfix_and_init(thread_p, pgptr)
Definition: page_buffer.h:61
HEAP_SCANCACHE_NODE node
Definition: heap_file.h:135
#define OR_VAR_IS_NULL(obj, index)
#define MVCC_GET_PREV_VERSION_LSA(header)
Definition: mvcc.h:193
#define MVCC_SET_INSID(header, mvcc_id)
Definition: mvcc.h:36
int reserve1_for_future
Definition: heap_file.c:215
#define OR_MVCC_INSERT_HEADER_SIZE
#define ER_HEAP_UNABLE_TO_CREATE_HEAP
Definition: error_code.h:97
int spage_get_record_length(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id)
#define CT_SERIAL_NAME
Definition: transform.h:136
char * vpid_to_string(char *buf, int buf_size, VPID *vpid)
int heap_chnguess_put(THREAD_ENTRY *thread_p, const OID *oid, int tran_index, int chn)
Definition: heap_file.c:15157
void vacuum_log_add_dropped_file(THREAD_ENTRY *thread_p, const VFID *vfid, const OID *class_oid, bool pospone_or_undo)
Definition: vacuum.c:6072
#define HEAP_NUM_BEST_SPACESTATS
Definition: heap_file.c:170
int ATTR_ID
SCAN_CODE heap_attrinfo_transform_to_disk_except_lob(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info, RECDES *old_recdes, RECDES *new_recdes)
Definition: heap_file.c:11397
#define LOG_FIND_CURRENT_TDES(thrd)
Definition: log_impl.h:953
Definition: heap_file.h:173
int pr_free_ext_value(DB_VALUE *value)
int spage_update(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, const RECDES *record_descriptor_p)
LOCK
int heap_attrinfo_set_uninitialized_global(THREAD_ENTRY *thread_p, OID *inst_oid, RECDES *recdes, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:16545
LF_HASH_TABLE hfid_hash
Definition: heap_file.h:164
MIN_MAX_COLUMN_TYPE type
Definition: dbtype_def.h:882
unsigned int of_del_tran_id
Definition: lock_free.h:73
PGBUF_WATCHER home_page_watcher
Definition: heap_file.h:272
BTREE_SEARCH xbtree_find_unique(THREAD_ENTRY *thread_p, BTID *btid, SCAN_OPERATION_TYPE scan_op_type, DB_VALUE *key, OID *class_oid, OID *oid, bool is_all_class_srch)
Definition: btree.c:23872
#define OID_PSEUDO_KEY(oidp)
Definition: oid.h:131
int heap_scancache_start(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, const HFID *hfid, const OID *class_oid, int cache_last_fix_page, int is_indexscan, MVCC_SNAPSHOT *mvcc_snapshot)
Definition: heap_file.c:6689
#define HEAP_NBYTES_CLEARED(byte_ptr, byte_cnt)
Definition: heap_file.c:413
DB_MONETARY * db_get_monetary(const DB_VALUE *value)
#define ER_UNEXPECTED
Definition: error_code.h:1250
HEAP_CLASSREPR_ENTRY * hash_next
Definition: heap_file.c:307
void mht_destroy(MHT_TABLE *ht)
Definition: memory_hash.c:1079
bool pr_is_set_type(DB_TYPE type)
int file_tracker_interruptable_iterate(THREAD_ENTRY *thread_p, FILE_TYPE desired_ftype, VFID *vfid, OID *class_oid)
int file_descriptor_get(THREAD_ENTRY *thread_p, const VFID *vfid, FILE_DESCRIPTORS *desc_out)
#define HEAP_CHK_ADD_UNFOUND_RELOCOIDS
Definition: heap_file.c:268
int boot_find_root_heap(HFID *root_hfid_p)
Definition: boot_sr.c:310
PAGE_TYPE pgbuf_get_page_ptype(THREAD_ENTRY *thread_p, PAGE_PTR pgptr)
Definition: page_buffer.c:4610
#define HEAP_GUESS_NUM_INDEXED_ATTRS
Definition: heap_file.c:82
int file_alloc_sticky_first_page(THREAD_ENTRY *thread_p, const VFID *vfid, FILE_INIT_PAGE_FUNC f_init, void *f_init_args, VPID *vpid_out, PAGE_PTR *page_out)
int heap_attrinfo_start(THREAD_ENTRY *thread_p, const OID *class_oid, int requested_num_attrs, const ATTR_ID *attrids, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:9319
int spage_max_record_size(void)
Definition: slotted_page.c:867
int heap_scancache_quick_start_modify(HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:6929
int spage_check(THREAD_ENTRY *thread_p, PAGE_PTR page_p)
void spage_dump(THREAD_ENTRY *thread_p, FILE *fp, PAGE_PTR page_p, int is_record_printed)
#define RECDES_INITIALIZER
int lf_hash_init(LF_HASH_TABLE *table, LF_FREELIST *freelist, unsigned int hash_size, LF_ENTRY_DESCRIPTOR *edesc)
Definition: lock_free.c:1875
#define MVCCID_IS_NORMAL(id)
#define HEAP_BESTSPACE_SYNC_THRESHOLD
Definition: heap_file.c:74
TP_DOMAIN * tp_domain_resolve_default(DB_TYPE type)
void er_set(int severity, const char *file_name, const int line_no, int err_id, int num_args,...)
int spage_max_space_for_new_record(THREAD_ENTRY *thread_p, PAGE_PTR page_p)
#define HEAP_STATS_ENTRY_FREELIST_SIZE
Definition: heap_file.c:87
OID * oid_User_class_oid
Definition: oid.c:78
void logpb_flush_pages_direct(THREAD_ENTRY *thread_p)
int db_set_get(DB_SET *set, int index, DB_VALUE *value)
Definition: db_set.c:503
PGBUF_WATCHER * forward_page_watcher_p
Definition: heap_file.h:281
char * fileio_get_volume_label(VOLID vol_id, bool is_peek)
Definition: file_io.c:6090
#define ER_QPROC_INVALID_PARAMETER
Definition: error_code.h:959
#define HEAP_HEADER_AND_CHAIN_SLOTID
Definition: heap_file.h:56
int heap_attrinfo_read_dbvalues_without_oid(THREAD_ENTRY *thread_p, RECDES *recdes, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10294
PAGE_FETCH_MODE
Definition: page_buffer.h:158
PAGE_PTR pgptr
Definition: recovery.h:192
NON_EXISTENT_HANDLING
bool pgbuf_has_prevent_dealloc(PAGE_PTR pgptr)
void log_skip_logging(THREAD_ENTRY *thread_p, LOG_DATA_ADDR *addr)
Definition: log_manager.c:3270
struct or_partition OR_PARTITION
int heap_rv_redo_insert(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15420
HEAP_PAGE_VACUUM_STATUS heap_page_get_vacuum_status(THREAD_ENTRY *thread_p, PAGE_PTR heap_page)
Definition: heap_file.c:23633
#define OR_BYTE_SIZE
SCAN_CODE heap_page_prev(THREAD_ENTRY *thread_p, const OID *class_oid, const HFID *hfid, VPID *prev_vpid, DB_VALUE **cache_pageinfo)
Definition: heap_file.c:18218
REGU_VARIABLE * func_regu
Definition: xasl.h:289
int heap_rv_mvcc_redo_delete_home(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15866
#define DB_NEED_CLEAR(v)
Definition: dbtype.h:81
#define ER_LK_PAGE_TIMEOUT
Definition: error_code.h:130
LF_ENTRY_KEY_COPY_FUNC f_key_copy
Definition: lock_free.h:97
int heap_get_mvcc_rec_header_from_overflow(PAGE_PTR ovf_page, MVCC_REC_HEADER *mvcc_header, RECDES *peek_recdes)
Definition: heap_file.c:18652
#define pthread_mutex_destroy(a)
Definition: heap_file.c:67
int n_heap_objects
Definition: vacuum.h:133
#define ER_LC_UNKNOWN_CLASSNAME
Definition: error_code.h:117
int spage_check_slot_owner(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id)
int32_t fileid
Definition: dbtype_def.h:911
#define HEAP_STATS_ENTRY_MHT_EST_SIZE
Definition: heap_file.c:86
bool pgbuf_check_page_ptype(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, PAGE_TYPE ptype)
#define OR_ENABLE_BOUND_BIT(bitptr, element)
#define ER_SP_NOSPACE_IN_PAGE
Definition: error_code.h:93
#define OR_MVCC_INSERT_ID_OFFSET
#define AUTO_INCREMENT_SERIAL_NAME_MAX_LENGTH
Definition: transform.h:180
int file_get_num_user_pages(THREAD_ENTRY *thread_p, const VFID *vfid, int *n_user_pages_out)
void heap_clean_get_context(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context)
Definition: heap_file.c:24482
#define PGBUF_WATCHER_RESET_RANK(w, rank)
Definition: page_buffer.h:107
DISK_ISVALID vacuum_check_not_vacuumed_recdes(THREAD_ENTRY *thread_p, OID *oid, OID *class_oid, RECDES *recdes, int btree_node_type)
Definition: vacuum.c:7304
SCAN_CODE heap_scan_get_visible_version(THREAD_ENTRY *thread_p, const OID *oid, OID *class_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking, int old_chn)
Definition: heap_file.c:24134
int prm_get_integer_value(PARAM_ID prm_id)
int heap_rv_mvcc_redo_redistribute(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:23851
#define ER_GENERIC_ERROR
Definition: error_code.h:45
int heap_scanrange_start(THREAD_ENTRY *thread_p, HEAP_SCANRANGE *scan_range, const HFID *hfid, const OID *class_oid, MVCC_SNAPSHOT *mvcc_snapshot)
Definition: heap_file.c:8094
DISK_ISVALID heap_check_all_heaps(THREAD_ENTRY *thread_p)
Definition: heap_file.c:13847
OR_DEFAULT_VALUE default_value
#define OID_IS_ROOTOID(oidp)
Definition: oid.h:83
PGBUF_LATCH_MODE
Definition: page_buffer.h:174
unsigned int record_length
Definition: slotted_page.h:89
int tp_domain_status_er_set(TP_DOMAIN_STATUS status, const char *file_name, const int line_no, const DB_VALUE *src, const TP_DOMAIN *domain)
LF_ENTRY_KEY_COMPARE_FUNC f_key_cmp
Definition: lock_free.h:100
void heap_classrepr_dump_all(THREAD_ENTRY *thread_p, FILE *fp, OID *class_oid)
Definition: heap_file.c:16728
DB_IDENTIFIER OID
Definition: dbtype_def.h:992
BTID * heap_indexinfo_get_btid(int btid_index, HEAP_CACHE_ATTRINFO *attrinfo)
Definition: heap_file.c:12841
unsigned int of_next
Definition: lock_free.h:70
LC_FIND_CLASSNAME xlocator_find_class_oid(THREAD_ENTRY *thread_p, const char *classname, OID *class_oid, LOCK lock)
Definition: locator_sr.c:1023
OR_ATTRIBUTE * heap_locate_last_attrepr(ATTR_ID attrid, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10531
void heap_rv_dump_reuse_page(FILE *fp, int ignore_length, void *ignore_data)
Definition: heap_file.c:16220
#define ER_IT_DATA_OVERFLOW
Definition: error_code.h:501
#define ER_OUT_OF_VIRTUAL_MEMORY
Definition: error_code.h:46
#define FREE(p)
Definition: dict_private.h:66
void log_append_undo_data(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int length, const void *data)
Definition: log_manager.c:1947
LPOLESTR DBORDINAL flags dbtype
int or_chn(RECDES *record)
DB_TYPE db_value_type(const DB_VALUE *value)
int orc_superclasses_from_record(RECDES *record, int *array_size, OID **array_ptr)
#define LF_ENTRY_DESCRIPTOR_INITIALIZER
Definition: lock_free.h:110
int heap_get_class_name_alloc_if_diff(THREAD_ENTRY *thread_p, const OID *class_oid, char *guess_classname, char **classname_out)
Definition: heap_file.c:9243
VPID full_search_vpid
Definition: heap_file.c:206
#define OR_VALUE_ALIGNED_SIZE(value)
PGBUF_LATCH_CONDITION
Definition: page_buffer.h:183
#define SINGLE_ROW_UPDATE
Definition: btree.h:47
int heap_get_hfid_from_vfid(THREAD_ENTRY *thread_p, const VFID *vfid, HFID *hfid)
Definition: heap_file.c:24701
unsigned is_desc
lf_tran_entry * thread_get_tran_entry(cubthread::entry *thread_p, int entry_idx)
#define ER_HF_MAX_BESTSPACE_ENTRIES
Definition: error_code.h:1364
int reserve0_for_future
Definition: heap_file.c:214
bool oid_is_root(const OID *oid)
Definition: oid.c:135
#define OR_FIXED_ATTRIBUTES_OFFSET_BY_OBJ(obj, nvars)
Definition: heap_file.c:78
#define pgbuf_replace_watcher(thread_p, old_watcher, new_watcher)
Definition: page_buffer.h:328
STATIC_INLINE void LSA_COPY(LOG_LSA *plsa1, const LOG_LSA *plsa2)
#define ER_HEAP_WRONG_ATTRINFO
Definition: error_code.h:747
int or_put_int(OR_BUF *buf, int num)
int heap_update_logical(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context)
Definition: heap_file.c:22691
BTREE_SEARCH
pthread_mutex_t bestspace_mutex
Definition: heap_file.c:466
int intl_identifier_casecmp(const char *str1, const char *str2)
int num_other_high_best
Definition: heap_file.c:192
int numeric_db_value_coerce_from_num(DB_VALUE *src, DB_VALUE *dest, DB_DATA_STATUS *data_status)
#define DB_VALUE_DOMAIN_TYPE(value)
Definition: dbtype.h:68
HEAP_ATTR_TYPE attr_type
Definition: heap_attrinfo.h:55
OR_CLASSREP ** repr
Definition: heap_file.c:314
void spage_collect_statistics(PAGE_PTR page_p, int *npages, int *nrecords, int *rec_length)
#define VACUUM_ER_LOG_HEAP
Definition: vacuum.h:48
unsigned int record_type
Definition: slotted_page.h:90
#define DB_MAX_IDENTIFIER_LENGTH
Definition: dbtype_def.h:522
#define INT64
void heap_create_update_context(HEAP_OPERATION_CONTEXT *context, HFID *hfid_p, OID *oid_p, OID *class_oid_p, RECDES *recdes_p, HEAP_SCANCACHE *scancache_p, UPDATE_INPLACE_STYLE in_place)
Definition: heap_file.c:22309
int heap_manager_initialize(void)
Definition: heap_file.c:4955
THREAD_ENTRY * next_wait_thrd
Definition: heap_file.c:324
LOG_PAGEID logical_pageid
Definition: log_impl.h:127
void heap_create_insert_context(HEAP_OPERATION_CONTEXT *context, HFID *hfid_p, OID *class_oid_p, RECDES *recdes_p, HEAP_SCANCACHE *scancache_p)
Definition: heap_file.c:22257
bool heap_remove_page_on_vacuum(THREAD_ENTRY *thread_p, PAGE_PTR *page_ptr, HFID *hfid)
Definition: heap_file.c:4528
int spage_compact(THREAD_ENTRY *thread_p, PAGE_PTR page_p)
#define TP_DOMAIN_COLLATION(dom)
int heap_rv_redo_update_and_update_chain(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:18873
#define OR_HEADER_SIZE(ptr)
int or_put_bigint(OR_BUF *buf, DB_BIGINT num)
void lock_unlock_object(THREAD_ENTRY *thread_p, const OID *oid, const OID *class_oid, LOCK lock, bool force)
#define MIN(a, b)
Definition: dict_private.h:69
int head_second_best
Definition: heap_file.c:201
int heap_rv_redo_newpage(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15304
#define VPID_EQ(vpid_ptr1, vpid_ptr2)
Definition: dbtype_def.h:940
#define VFID_INITIALIZER
Definition: dbtype_def.h:915
HEAP_ATTRVALUE * heap_attrvalue_locate(ATTR_ID attrid, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10484
DB_VALUE * heap_attrinfo_generate_key(THREAD_ENTRY *thread_p, int n_atts, int *att_ids, int *atts_prefix_length, HEAP_CACHE_ATTRINFO *attr_info, RECDES *recdes, DB_VALUE *db_valuep, char *buf, FUNCTION_INDEX_INFO *func_index_info)
Definition: heap_file.c:12522
int heap_get_num_objects(THREAD_ENTRY *thread_p, const HFID *hfid, int *npages, int *nobjs, int *avg_length)
Definition: heap_file.c:8827
int spage_mark_deleted_slot_as_reusable(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id)
int heap_insert_logical(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context)
Definition: heap_file.c:22356
#define HFID_SET_NULL(hfid)
#define OR_NON_MVCC_HEADER_SIZE
short volid
Definition: dbtype_def.h:905
HEAP_CLASSREPR_ENTRY * LRU_bottom
Definition: heap_file.c:341
#define ER_ALTER_CHANGE_CAST_FAILED_SET_MAX
Definition: error_code.h:1320
#define ER_QPROC_SIZE_STRING_TRUNCATED
Definition: error_code.h:1295
#define OID_EQ(oidp1, oidp2)
Definition: oid.h:93
#define OR_MVCC_PREV_VERSION_LSA_SIZE
#define OR_MVCC_FLAG_MASK
LOCK lock_Conv[11][11]
Definition: lock_table.c:147
DB_VALUE * db_value_copy(DB_VALUE *value)
Definition: db_macro.c:1531
#define heap_classrepr_free_and_init(class_repr, idxp)
Definition: heap_file.h:85
SCAN_CODE spage_get_page_header_info(PAGE_PTR page_p, DB_VALUE **page_header_info)
int heap_attrinfo_read_dbvalues(THREAD_ENTRY *thread_p, const OID *inst_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:10230
VFID vfid
int locator_attribute_info_force(THREAD_ENTRY *thread_p, const HFID *hfid, OID *oid, HEAP_CACHE_ATTRINFO *attr_info, ATTR_ID *att_id, int n_att_id, LC_COPYAREA_OPERATION operation, int op_type, HEAP_SCANCACHE *scan_cache, int *force_count, bool not_check_fk, REPL_INFO_TYPE repl_info, int pruning_type, PRUNING_CONTEXT *pcontext, FUNC_PRED_UNPACK_INFO *func_preds, MVCC_REEV_DATA *mvcc_reev_data, UPDATE_INPLACE_STYLE force_update_inplace, RECDES *rec_descriptor, bool need_locking)
Definition: locator_sr.c:7339
#define TP_DOMAIN_TYPE(dom)
int logpb_fetch_page(THREAD_ENTRY *thread_p, LOG_LSA *req_lsa, LOG_CS_ACCESS_MODE access_mode, LOG_PAGE *log_pgptr)
XML_EndCdataSectionHandler end
Definition: expat.h:541
#define OR_MVCC_FLAG_SHIFT_BITS
int overflow_get_capacity(THREAD_ENTRY *thread_p, const VPID *ovf_vpid, int *ovf_size, int *ovf_num_pages, int *ovf_overhead, int *ovf_free_space)
int tail_second_best
Definition: heap_file.c:202
int length
Definition: recovery.h:195
MVCC_SATISFIES_SNAPSHOT_RESULT mvcc_is_not_deleted_for_snapshot(THREAD_ENTRY *thread_p, MVCC_REC_HEADER *rec_header, MVCC_SNAPSHOT *snapshot)
Definition: mvcc.c:372
int heap_estimate(THREAD_ENTRY *thread_p, const HFID *hfid, int *npages, int *nobjs, int *avg_length)
Definition: heap_file.c:8897
#define ER_HEAP_UNKNOWN_HEAP
Definition: error_code.h:686
void * mht_get(MHT_TABLE *ht, const void *key)
Definition: memory_hash.c:1224
#define HEAP_HFID_HASH_SIZE
Definition: heap_file.h:169
int or_put_offset_internal(OR_BUF *buf, int num, int offset_size)
#define NULL
Definition: freelistheap.h:19
int file_alloc(THREAD_ENTRY *thread_p, const VFID *vfid, FILE_INIT_PAGE_FUNC f_init, void *f_init_args, VPID *vpid_out, PAGE_PTR *page_out)
int db_string_truncate(DB_VALUE *value, const int precision)
Definition: db_macro.c:957
RECDES * recdes_p
Definition: heap_file.h:342
UINT64 MVCCID
int heap_attrinfo_clear_dbvalues(HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:9914
int lf_hash_find_or_insert(LF_TRAN_ENTRY *tran, LF_HASH_TABLE *table, void *key, void **entry, int *inserted)
Definition: lock_free.c:2103
PGNSLOTS spage_number_of_records(PAGE_PTR page_p)
Definition: slotted_page.c:879
void db_value_fprint(FILE *fp, const DB_VALUE *value)
Definition: db_macro.c:1675
#define MVCC_IS_FLAG_SET(rec_header_p, flags)
Definition: mvcc.h:66
#define HEAP_LOG_MVCC_INSERT_MAX_REDO_CRUMBS
struct pr_type * type
Definition: object_domain.h:78
HEAP_CLASSREPR_LOCK * lock_table
Definition: heap_file.c:359
int heap_indexinfo_get_attrs_prefix_length(int btid_index, HEAP_CACHE_ATTRINFO *attrinfo, int *attrs_prefix_length, int len_attrs_prefix_length)
Definition: heap_file.c:12904
const char * pr_type_name(DB_TYPE id)
#define MAX(a, b)
Definition: dict_private.h:70
#define HEAP_GUESS_NUM_ATTRS_REFOIDS
Definition: heap_file.c:81
void tsc_getticks(TSC_TICKS *tck)
Definition: tsc_timer.c:82
OR_CLASSREP ** or_get_all_representation(RECDES *record, bool do_indexes, int *count)
HEAP_SCANCACHE scan_cache
Definition: heap_file.h:158
const VFID * vfid
Definition: log_impl.h:1839
int heap_scancache_quick_start_with_class_oid(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, OID *class_oid)
Definition: heap_file.c:19238
VFID vfid
int or_class_get_partition_info(RECDES *record, OR_PARTITION *partition_info, REPR_ID *repr_id, int *has_partition_info)
#define vacuum_er_log(er_log_level, msg,...)
Definition: vacuum.h:63
#define HEAP_DEBUG_ISVALID_SCANRANGE(scan_range)
Definition: heap_file.c:98
#define HEAP_STATS_PREV_BEST_INDEX(i)
Definition: heap_file.c:175
LF_TRAN_SYSTEM hfid_table_Ts
Definition: lock_free.c:57
#define LC_NEXT_ONEOBJ_PTR_IN_COPYAREA(oneobj_ptr)
Definition: locator.h:49
#define OR_MVCC_DELETE_ID_OFFSET(mvcc_flags)
int(* data_writeval)(OR_BUF *buf, DB_VALUE *value)
OR_ATTRIBUTE * read_attrepr
Definition: heap_attrinfo.h:57
void log_append_undoredo_recdes(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, const RECDES *undo_recdes, const RECDES *redo_recdes)
Definition: log_manager.c:2451
#define BEST_PAGE_SEARCH_MAX_COUNT
int heap_vpid_next(THREAD_ENTRY *thread_p, const HFID *hfid, PAGE_PTR pgptr, VPID *next_vpid)
Definition: heap_file.c:4868
#define BTID_SET_NULL(btid)
LC_FIND_CLASSNAME
unsigned int of_mutex
Definition: lock_free.h:79
int file_create_heap(THREAD_ENTRY *thread_p, bool reuse_oid, const OID *class_oid, VFID *vfid)
int db_value_domain_max(DB_VALUE *value, const DB_TYPE type, const int precision, const int scale, const int codeset, const int collation_id, const DB_ENUMERATION *enumeration)
Definition: db_macro.c:577
void * ptr
Definition: expat.h:904
PAGE_PTR pgptr
Definition: log_impl.h:1840
TP_DOMAIN * tp_domain_cache(TP_DOMAIN *transient)
LF_ENTRY_DUPLICATE_KEY_HANDLER f_duplicate
Definition: lock_free.h:107
int db_seq_free(DB_SEQ *seq)
Definition: db_set.c:318
#define err(fd,...)
Definition: porting.h:425
int heap_estimate_num_objects(THREAD_ENTRY *thread_p, const HFID *hfid)
Definition: heap_file.c:8952
#define ER_PB_BAD_PAGEID
Definition: error_code.h:63
#define db_private_free_and_init(thrd, ptr)
Definition: memory_alloc.h:142
int xserial_get_next_value(THREAD_ENTRY *thread_p, DB_VALUE *result_num, const OID *oid_p, int cached_num, int num_alloc, int is_auto_increment, bool force_set_last_insert_id)
Definition: serial.c:274
int num_second_best
Definition: heap_file.c:199
void log_append_undoredo_data(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int undo_length, int redo_length, const void *undo_data, const void *redo_data)
Definition: log_manager.c:1867
HEAP_STATS_ENTRY * next
Definition: heap_file.c:224
#define OR_MAX_BYTE
int lf_freelist_init(LF_FREELIST *freelist, int initial_blocks, int block_size, LF_ENTRY_DESCRIPTOR *edesc, LF_TRAN_SYSTEM *tran_system)
Definition: lock_free.c:668
#define pgbuf_fix(thread_p, vpid, fetch_mode, requestmode, condition)
Definition: page_buffer.h:246
void thread_lock_entry(cubthread::entry *thread_p)
#define HEAP_UPDATE_IS_MVCC_OP(is_mvcc_class, update_style)
Definition: heap_file.c:141
int numeric_db_value_is_positive(const DB_VALUE *dbvalue)
MHT_TABLE * mht_create(const char *name, int est_size, unsigned int(*hash_func)(const void *key, unsigned int ht_size), int(*cmp_func)(const void *key1, const void *key2))
Definition: memory_hash.c:916
#define MVCC_ID_PRECEDES(id1, id2)
Definition: mvcc.h:178
assert(k<=ptr->_max_k)
#define HEAP_PAGE_GET_VACUUM_STATUS(chain)
Definition: heap_file.c:250
PGLENGTH offset
Definition: log_impl.h:128
#define HEAP_LOG_MVCC_REDISTRIBUTE_MAX_REDO_CRUMBS
DISK_ISVALID disk_is_page_sector_reserved(THREAD_ENTRY *thread_p, VOLID volid, PAGEID pageid)
#define csect_exit(a, b)
Definition: cnv.c:139
#define db_private_free(thrd, ptr)
Definition: memory_alloc.h:230
void or_init(OR_BUF *buf, char *data, int length)
int heap_rv_update_chain_after_mvcc_op(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:23684
STATIC_INLINE int heap_copy_chain(THREAD_ENTRY *thread_p, PAGE_PTR page_heap, HEAP_CHAIN *chain) __attribute__((ALWAYS_INLINE))
Definition: heap_file.c:4032
#define db_private_alloc(thrd, size)
Definition: memory_alloc.h:228
SCAN_CODE spage_next_record_dont_skip_empty(PAGE_PTR page_p, PGSLOTID *out_slot_id_p, RECDES *record_descriptor_p, int is_peeking)
PGBUF_WATCHER page_watcher
Definition: heap_file.h:140
#define NULL_FILEID
PGSLOTID spage_find_free_slot(PAGE_PTR page_p, SPAGE_SLOT **out_slot_p, PGSLOTID start_slot)
const OID * heap_ovf_delete(THREAD_ENTRY *thread_p, const HFID *hfid, const OID *ovf_oid, VFID *ovf_vfid_p)
Definition: heap_file.c:6373
LPOLESTR DBORDINAL flags BYTE BYTE offsetof(_Class, member) \ }
INT32 flags
Definition: heap_file.c:265
unsigned char * bitindex
Definition: heap_file.c:447
#define NULL_OFFSET
int using_mutex
Definition: lock_free.h:82
#define BIG_VAR_OFFSET_SIZE
need_clear_type need_clear
Definition: dbtype_def.h:1109
bool logtb_set_check_interrupt(THREAD_ENTRY *thread_p, bool flag)
#define CEIL_PTVDIV(dividend, divisor)
Definition: memory_alloc.h:51
#define LSA_ISNULL(lsa_ptr)
HEAP_CLASSREPR_ENTRY * hash_next
Definition: heap_file.c:332
int heap_rv_redo_delete(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15651
int db_set_size(DB_SET *set)
Definition: db_set.c:552
OR_CLASSREP * heap_classrepr_get(THREAD_ENTRY *thread_p, const OID *class_oid, RECDES *class_recdes, REPR_ID reprid, int *idx_incache)
Definition: heap_file.c:2187
void er_set_with_oserror(int severity, const char *file_name, const int line_no, int err_id, int num_args,...)
void heap_attrinfo_dump(THREAD_ENTRY *thread_p, FILE *fp, HEAP_CACHE_ATTRINFO *attr_info, bool dump_schema)
Definition: heap_file.c:10434
LC_COPYAREA_ONEOBJ ** obj
Definition: locator.h:250
VFID * heap_ovf_find_vfid(THREAD_ENTRY *thread_p, const HFID *hfid, VFID *ovf_vfid, bool docreate, PGBUF_LATCH_CONDITION latch_cond)
Definition: heap_file.c:6220
HEAP_BESTSPACE best
Definition: heap_file.c:223
#define IO_DEFAULT_PAGE_SIZE
MHT_TABLE * ht
Definition: heap_file.c:445
int pr_clear_value(DB_VALUE *value)
void heap_scanrange_end(THREAD_ENTRY *thread_p, HEAP_SCANRANGE *scan_range)
Definition: heap_file.c:8129
SCAN_CODE heap_scanrange_to_following(THREAD_ENTRY *thread_p, HEAP_SCANRANGE *scan_range, OID *start_oid)
Definition: heap_file.c:8156
DB_BIGINT db_get_bigint(const DB_VALUE *value)
int heap_scan_cache_allocate_area(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache_p, int size)
Definition: heap_file.c:24569
struct btree_unique_stats BTREE_UNIQUE_STATS
pthread_mutex_t mutex
Definition: heap_file.c:299
char home_recdes_buffer[IO_MAX_PAGE_SIZE+MAX_ALIGNMENT]
Definition: heap_file.h:267
RECDES * recdes
Definition: locator.h:252
void pgbuf_get_vpid(PAGE_PTR pgptr, VPID *vpid)
Definition: page_buffer.c:4514
#define HEAP_NBYTES_TO_NBITS(byte_cnt)
Definition: heap_file.c:412
int db_value_domain_default(DB_VALUE *value, const DB_TYPE type, const int precision, const int scale, const int codeset, const int collation_id, DB_ENUMERATION *enumeration)
Definition: db_macro.c:751
size
HEAP_SCANCACHE_NODE node
Definition: heap_file.h:127
#define VFID_COPY(vfid_ptr1, vfid_ptr2)
Definition: file_manager.h:69
pthread_mutex_t free_mutex
Definition: heap_file.c:347
int heap_rv_redo_mark_reusable_slot(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15985
#define LF_FREELIST_INITIALIZER
Definition: lock_free.h:295
void log_sysop_abort(THREAD_ENTRY *thread_p)
Definition: log_manager.c:4097
#define NULL_REPRID
FILE_HEAP_DES heap
Definition: file_manager.h:132
#define MVCC_SET_FLAG_BITS(rec_header_p, flag)
Definition: mvcc.h:77
int heap_vacuum_all_objects(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *upd_scancache, MVCCID threshold_mvccid)
Definition: heap_file.c:23166
void er_stack_pop(void)
void heap_chnguess_clear(THREAD_ENTRY *thread_p, int tran_index)
Definition: heap_file.c:15267
SCAN_CODE heap_first(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking)
Definition: heap_file.c:7988
#define ER_ALTER_CHANGE_CAST_FAILED_SET_MIN
Definition: error_code.h:1319
struct db_domain_info::general_info general_info
HEAP_CLASSREPR_ENTRY * next
Definition: heap_file.c:309
void heap_create_delete_context(HEAP_OPERATION_CONTEXT *context, HFID *hfid_p, OID *oid_p, OID *class_oid_p, HEAP_SCANCACHE *scancache_p)
Definition: heap_file.c:22283
TP_DOMAIN * domain
Definition: regu_var.h:419
#define HEAP_NBITS_TO_NBYTES(bit_cnt)
Definition: heap_file.c:410
#define OR_CLEAR_BOUND_BIT(bitptr, element)
#define OR_CHN_OFFSET
OR_CLASSREP * or_get_classrep(RECDES *record, int repid)
MVCCID logtb_get_current_mvccid(THREAD_ENTRY *thread_p)
#define CAST_BUFLEN
Definition: porting.h:465
int ncolumns
Definition: dbtype_def.h:889
#define OR_VAR_LENGTH(length, obj, index, n_variables)
DB_ENUMERATION enumeration
Definition: object_domain.h:86
#define TP_IS_CHAR_TYPE(typeid)
PGNSLOTS spage_number_of_slots(PAGE_PTR page_p)
Definition: slotted_page.c:898
TP_DOMAIN_COLL_ACTION collation_flag
Definition: object_domain.h:96
int heap_get_index_with_name(THREAD_ENTRY *thread_p, OID *class_oid, const char *index_name, BTID *btid)
Definition: heap_file.c:12942
error($message)
Definition: run-tests.php:2122
int heap_rv_redo_reuse_page_reuse_oid(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16173
int file_descriptor_dump(THREAD_ENTRY *thread_p, const VFID *vfid, FILE *fp)
int heap_classrepr_decache(THREAD_ENTRY *thread_p, const OID *class_oid)
Definition: heap_file.c:1739
LF_ENTRY_HASH_FUNC f_hash
Definition: lock_free.h:103
int file_init_page_type(THREAD_ENTRY *thread_p, PAGE_PTR page, void *args)
void log_append_postpone(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, LOG_DATA_ADDR *addr, int length, const void *data)
Definition: log_manager.c:2720
#define VPID_ISNULL(vpid_ptr)
Definition: dbtype_def.h:950
#define HEAP_MVCC_SET_HEADER_MAXIMUM_SIZE(mvcc_rec_header_p)
Definition: heap_file.c:116
DB_CLASS_PARTITION_TYPE
Definition: dbdef.h:64
const char * data
Definition: recovery.h:196
#define ER_SP_INVALID_HEADER
Definition: error_code.h:1439
int heap_chnguess_get(THREAD_ENTRY *thread_p, const OID *oid, int tran_index)
Definition: heap_file.c:15106
int heap_classrepr_find_index_id(OR_CLASSREP *classrepr, const BTID *btid)
Definition: heap_file.c:12050
void log_append_undo_recdes2(THREAD_ENTRY *thread_p, LOG_RCVINDEX rcvindex, const VFID *vfid, PAGE_PTR pgptr, PGLENGTH offset, const RECDES *recdes)
Definition: log_manager.c:2557
int heap_prefetch(THREAD_ENTRY *thread_p, OID *class_oid, const OID *oid, LC_COPYAREA_DESC *prefetch)
Definition: heap_file.c:13362
int xheap_destroy(THREAD_ENTRY *thread_p, const HFID *hfid, const OID *class_oid)
Definition: heap_file.c:5658
STATIC_INLINE void perfmon_inc_stat(THREAD_ENTRY *thread_p, PERF_STAT_ID psid) __attribute__((ALWAYS_INLINE))
#define ER_INTERRUPTED
Definition: error_code.h:47
int heap_get_btid_from_index_name(THREAD_ENTRY *thread_p, const OID *p_class_oid, const char *index_name, BTID *p_found_btid)
Definition: heap_file.c:16792
void file_postpone_destroy(THREAD_ENTRY *thread_p, const VFID *vfid)
#define pthread_mutex_unlock(a)
Definition: heap_file.c:70
TP_DOMAIN * tp_domain_construct(DB_TYPE domain_type, DB_OBJECT *class_obj, int precision, int scale, TP_DOMAIN *setdomain)
int heap_set_mvcc_rec_header_on_overflow(PAGE_PTR ovf_page, MVCC_REC_HEADER *mvcc_header)
Definition: heap_file.c:18678
PGBUF_WATCHER forward_page_watcher
Definition: heap_file.h:275
#define true
Definition: system.h:94
PGSLOTID spage_delete(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id)
SCAN_CODE heap_get_class_oid(THREAD_ENTRY *thread_p, const OID *oid, OID *class_oid)
Definition: heap_file.c:9177
int db_make_midxkey(DB_VALUE *value, DB_MIDXKEY *midxkey)
#define LOG_FIND_THREAD_TRAN_INDEX(thrd)
Definition: perf_monitor.h:157
int heap_classrepr_free(OR_CLASSREP *classrep, int *idx_incache)
Definition: heap_file.c:1781
void overflow_flush(THREAD_ENTRY *thread_p, const VPID *ovf_vpid)
#define MVCC_GET_FLAG(header)
Definition: mvcc.h:57
#define HFID_IS_NULL(hfid)
#define ER_PB_UNEXPECTED_PAGE_REFIX
Definition: error_code.h:1521
#define free(p)
Definition: leak_detector.h:5
bool db_value_is_null(const DB_VALUE *value)
char * or_class_name(RECDES *record)
return k
Definition: mprec.c:281
#define LOG_DATA_ADDR_INITIALIZER
Definition: log_impl.h:1844
#define ARG_FILE_LINE
Definition: error_manager.h:45
#define OR_FIXED_ATT_IS_UNBOUND(obj, nvars, fsize, position)
STATIC_INLINE HEAP_HDR_STATS * heap_get_header_stats_ptr(THREAD_ENTRY *thread_p, PAGE_PTR page_header) __attribute__((ALWAYS_INLINE))
Definition: heap_file.c:3971
#define QSTR_IS_NATIONAL_CHAR(s)
Definition: string_opfunc.h:43
VOLID pgbuf_get_volume_id(PAGE_PTR pgptr)
Definition: page_buffer.c:4642
#define HEAP_PERF_START(thread_p, context)
Definition: heap_file.c:501
LF_ENTRY_ALLOC_FUNC f_alloc
Definition: lock_free.h:85
#define HEAP_DROP_FREE_SPACE
Definition: heap_file.c:90
int pr_clone_value(const DB_VALUE *src, DB_VALUE *dest)
int heap_rv_mvcc_undo_delete(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15718
int heap_rv_redo_update(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16074
int heap_rv_mvcc_redo_delete_overflow(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15906
OID * db_get_oid(const DB_VALUE *value)
int log_add_to_modified_class_list(THREAD_ENTRY *thread_p, const char *classname, const OID *class_oid)
Definition: log_manager.c:4800
#define ER_HEAP_UNKNOWN_ATTRS
Definition: error_code.h:748
void heap_init_get_context(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context, const OID *oid, OID *class_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking, int old_chn)
Definition: heap_file.c:24522
#define ER_ALTER_CHANGE_TRUNC_OVERFLOW_NOT_ALLOWED
Definition: error_code.h:1317
SCAN_CODE heap_attrinfo_transform_to_disk(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info, RECDES *old_recdes, RECDES *new_recdes)
Definition: heap_file.c:11377
#define OR_OID_SIZE
int logpb_prior_lsa_append_all_list(THREAD_ENTRY *thread_p)
OID class_oid
Definition: heap_file.c:261
float prm_get_float_value(PARAM_ID prm_id)
unsigned char * bits
Definition: heap_file.c:437
int zone
Definition: heap_file.c:303
const void * mht_put_new(MHT_TABLE *ht, const void *key, void *data)
Definition: memory_hash.c:1487
int heap_delete_logical(THREAD_ENTRY *thread_p, HEAP_OPERATION_CONTEXT *context)
Definition: heap_file.c:22525
#define OR_MVCC_FLAG_VALID_INSID
int or_mvcc_set_header(RECDES *record, MVCC_REC_HEADER *mvcc_rec_header)
LOG_LSA prev_version_lsa
#define VACUUM_LOG_ADD_DROPPED_FILE_POSTPONE
Definition: vacuum.h:76
unsigned int offset_to_record
Definition: slotted_page.h:88
#define BTREE_IS_MULTI_ROW_OP(op)
Definition: btree.h:53
OR_CLASSREP * last_classrepr
Definition: heap_attrinfo.h:69
#define OR_GET_INT(ptr)
MVCC_SNAPSHOT * mvcc_snapshot
Definition: heap_file.h:147
unsigned int mht_count(const MHT_TABLE *ht)
Definition: memory_hash.c:2017
#define LOG_CS_EXIT(thread_p)
Definition: log_impl.h:814
INT16 PGSLOTID
DB_DATA_STATUS
#define pthread_mutex_init(a, b)
Definition: heap_file.c:66
PGBUF_WATCHER * home_page_watcher_p
Definition: heap_file.h:278
#define MVCC_SET_REPID(header, rep_id)
Definition: mvcc.h:48
STATIC_INLINE void LSA_SET_NULL(LOG_LSA *lsa_ptr)
int xheap_create(THREAD_ENTRY *thread_p, HFID *hfid, const OID *class_oid, bool reuse_oid)
Definition: heap_file.c:5644
int stx_map_stream_to_func_pred(THREAD_ENTRY *thread_p, FUNC_PRED **xasl, char *xasl_stream, int xasl_stream_size, void **xasl_unpack_info_ptr)
bool vacuum_is_mvccid_vacuumed(MVCCID id)
Definition: vacuum.c:7410
#define free_and_init(ptr)
Definition: memory_alloc.h:148
int heap_scancache_quick_start_with_class_hfid(THREAD_ENTRY *thread_p, HEAP_SCANCACHE *scan_cache, const HFID *hfid)
Definition: heap_file.c:19264
#define OR_OFFSET_SIZE_FLAG
bool heap_attrinfo_check_unique_index(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info, ATTR_ID *att_id, int n_att_id)
Definition: heap_file.c:18912
#define DB_ALIGN(offset, align)
Definition: memory_alloc.h:85
#define strlen(s1)
Definition: intl_support.c:45
#define BTID_COPY(btid_ptr1, btid_ptr2)
SCAN_CODE spage_previous_record(PAGE_PTR page_p, PGSLOTID *out_slot_id_p, RECDES *record_descriptor_p, int is_peeking)
#define HEAP_BIT_CLEAR(byte_ptr, bit_num)
Definition: heap_file.c:426
int or_get_attrname(RECDES *record, int attrid, char **string, int *alloced_string)
int heap_initialize_hfid_table(void)
Definition: heap_file.c:23068
#define OR_GET_MVCC_FLAG(ptr)
int oid_compare_equals(const void *key_oid1, const void *key_oid2)
Definition: oid.c:310
#define HEAP_CHKRELOC_UNFOUND_SHORT
Definition: heap_file.c:14391
void heap_dump(THREAD_ENTRY *thread_p, FILE *fp, HFID *hfid, bool dump_records)
Definition: heap_file.c:13964
#define MVCC_SET_CHN(header, chn_)
Definition: mvcc.h:54
int fcnt
Definition: heap_file.c:301
HEAP_FINDSPACE
Definition: heap_file.c:147
#define OR_VAR_TABLE_SIZE_INTERNAL(vars, offset_size)
int heap_get_class_partitions(THREAD_ENTRY *thread_p, const OID *class_oid, OR_PARTITION **parts, int *parts_count)
Definition: heap_file.c:10866
DISK_ISVALID heap_check_heap_file(THREAD_ENTRY *thread_p, HFID *hfid)
Definition: heap_file.c:13791
int heap_attrinfo_start_with_btid(THREAD_ENTRY *thread_p, OID *class_oid, BTID *btid, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:12094
#define ER_FILE_NOT_ENOUGH_PAGES_IN_DATABASE
Definition: error_code.h:84
#define ALWAYS_INLINE
Definition: porting.h:43
#define OR_GET_MVCC_CHN(ptr)
DB_DOMAIN * domain
Definition: dbtype_def.h:890
PGBUF_WATCHER * header_page_watcher_p
Definition: heap_file.h:280
#define DB_PAGESIZE
HFID hfid
Definition: heap_file.h:180
VPID prev_vpid
Definition: heap_file.c:262
#define ER_HEAP_UNKNOWN_OBJECT
Definition: error_code.h:98
#define MVCC_GET_DELID(header)
Definition: mvcc.h:39
DB_VALUE dbvalue
Definition: heap_attrinfo.h:58
void lf_freelist_destroy(LF_FREELIST *freelist)
Definition: lock_free.c:713
PGBUF_WATCHER header_page_watcher
Definition: heap_file.h:274
int mht_rem2(MHT_TABLE *ht, const void *key, const void *data, int(*rem_func)(const void *key, void *data, void *args), void *func_args)
Definition: memory_hash.c:1835
HEAP_PAGE_VACUUM_STATUS
Definition: heap_file.h:326
void pgbuf_set_page_ptype(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, PAGE_TYPE ptype)
Definition: page_buffer.c:4817
bool heap_is_page_header(THREAD_ENTRY *thread_p, PAGE_PTR page)
Definition: heap_file.c:24727
bool prm_get_bool_value(PARAM_ID prm_id)
#define INT_ALIGNMENT
Definition: memory_alloc.h:62
#define QSTR_IS_ANY_CHAR_OR_BIT(s)
Definition: string_opfunc.h:48
#define HFID_EQ(hfid_ptr1, hfid_ptr2)
Definition: heap_file.h:42
OID class_oid
Definition: heap_file.c:312
int orc_subclasses_from_record(RECDES *record, int *array_size, OID **array_ptr)
int heap_rv_mvcc_undo_delete_overflow(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15771
HEAP_DIRECTION
Definition: heap_file.c:158
#define HEAP_BIT_GET(byte_ptr, bit_num)
Definition: heap_file.c:421
int spage_get_free_space_without_saving(THREAD_ENTRY *thread_p, PAGE_PTR page_p, bool *need_update)
Definition: slotted_page.c:944
#define OR_PUT_INT(ptr, val)
DISK_ISVALID vacuum_check_not_vacuumed_rec_header(THREAD_ENTRY *thread_p, OID *oid, OID *class_oid, MVCC_REC_HEADER *rec_header, int btree_node_type)
Definition: vacuum.c:7363
int db_get_string_size(const DB_VALUE *value)
SCAN_CODE heap_prev(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *next_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking)
Definition: heap_file.c:18553
DB_C_SHORT db_get_short(const DB_VALUE *value)
HEAP_CACHE_ATTRINFO * cache_attrinfo
Definition: xasl.h:290
int file_rv_tracker_mark_heap_deleted(THREAD_ENTRY *thread_p, LOG_RCV *rcv, bool is_undo)
#define HEAP_PERF_TRACK_LOGGING(thread_p, context)
Definition: heap_file.c:543
int(* data_readval)(OR_BUF *buf, DB_VALUE *value, struct tp_domain *domain, int size, bool copy, char *copy_buf, int copy_buf_len)
_Bigint *_Bigint * rv
Definition: mprec.c:113
unsigned page_was_unfixed
Definition: page_buffer.h:225
void er_clear(void)
void log_sysop_attach_to_outer(THREAD_ENTRY *thread_p)
Definition: log_manager.c:4156
#define REPR_HASH(class_oid)
Definition: heap_file.c:391
int chn
Definition: heap_file.c:434
HEAP_ATTRVALUE * values
Definition: heap_attrinfo.h:74
std::size_t thread_num_total_threads(void)
void log_sysop_commit(THREAD_ENTRY *thread_p)
Definition: log_manager.c:3979
int or_put_data(OR_BUF *buf, const char *data, int length)
SCAN_CODE spage_next_record(PAGE_PTR page_p, PGSLOTID *out_slot_id_p, RECDES *record_descriptor_p, int is_peeking)
#define OR_GET_MVCC_REPID_AND_FLAG(ptr)
STATIC_INLINE int heap_get_last_vpid(THREAD_ENTRY *thread_p, const HFID *hfid, VPID *last_vpid) __attribute__((ALWAYS_INLINE))
Definition: heap_file.c:3931
int REPR_ID
#define DB_VALUE_TYPE(value)
Definition: dbtype.h:70
void heap_flush(THREAD_ENTRY *thread_p, const OID *oid)
Definition: heap_file.c:5841
int i
Definition: dynamic_load.c:955
#define PGBUF_ORDERED_NULL_HFID
Definition: page_buffer.h:83
PGLENGTH offset
Definition: log_impl.h:1841
STATIC_INLINE int heap_copy_header_stats(THREAD_ENTRY *thread_p, PAGE_PTR page_header, HEAP_HDR_STATS *header_stats) __attribute__((ALWAYS_INLINE))
Definition: heap_file.c:3991
int mht_map(const MHT_TABLE *ht, int(*map_func)(const void *key, void *data, void *args), void *func_args)
Definition: memory_hash.c:1956
int db_make_null(DB_VALUE *value)
int spage_get_free_space(THREAD_ENTRY *thread_p, PAGE_PTR page_p)
Definition: slotted_page.c:917
#define OR_MAX_SHORT
HEAP_OPERATION_TYPE type
Definition: heap_file.h:251
DB_TYPE id
#define DB_IS_NULL(value)
Definition: dbtype.h:66
int heap_assign_address(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *oid, int expected_length)
Definition: heap_file.c:5786
#define OR_GET_OFFSET_SIZE(ptr)
#define NULL_ATTRID
Definition: heap_file.c:297
struct tp_domain * next
Definition: object_domain.h:76
int spage_slot_size(void)
Definition: slotted_page.c:846
bool heap_should_try_update_stat(const int current_freespace, const int prev_freespace)
Definition: heap_file.c:23719
OID class_oid
Definition: heap_file.h:175
INT16 type
int vacuum_rv_check_at_undo(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, INT16 slotid, INT16 rec_type)
Definition: vacuum.c:7777
#define HEAP_SET_RECORD(recdes, record_area_size, record_length, record_type, record_data)
Definition: heap_file.h:46
int heap_delete_hfid_from_cache(THREAD_ENTRY *thread_p, OID *class_oid)
Definition: heap_file.c:23143
HEAP_CLASSREPR_ENTRY * LRU_top
Definition: heap_file.c:340
#define NULL_VOLID
MVCC_SNAPSHOT_FUNC snapshot_fnc
Definition: mvcc.h:218
#define SP_ERROR
Definition: slotted_page.h:50
int lf_hash_delete(LF_TRAN_ENTRY *tran, LF_HASH_TABLE *table, void *key, int *success)
Definition: lock_free.c:2183
int debug_initpattern
Definition: heap_file.h:134
bool heap_is_big_length(int length)
Definition: heap_file.c:1248
#define IO_MAX_PAGE_SIZE
int heap_rv_undoredo_pagehdr(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15346
HEAP_SCANCACHE_NODE_LIST * next
Definition: heap_file.h:128
int heap_indexinfo_get_attrids(int btid_index, HEAP_CACHE_ATTRINFO *attrinfo, ATTR_ID *attrids)
Definition: heap_file.c:12880
#define MVCC_REC_HEADER_INITIALIZER
SCAN_CODE heap_header_next_scan(THREAD_ENTRY *thread_p, int cursor, DB_VALUE **out_values, int out_cnt, void *ptr)
Definition: heap_file.c:17626
int db_make_int(DB_VALUE *value, const int num)
int db_get_string_length(const DB_VALUE *value)
void heap_attrinfo_end(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:9871
bool pgbuf_has_any_waiters(PAGE_PTR pgptr)
short volid
Definition: dbtype_def.h:912
int heap_init_func_pred_unpack_info(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info, const OID *class_oid, FUNC_PRED_UNPACK_INFO **func_indx_preds)
Definition: heap_file.c:17337
int heap_rv_nop(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:23665
HEAP_SCANCACHE * scan_cache
Definition: heap_file.h:343
void thread_wakeup(cubthread::entry *thread_p, thread_resume_suspend_status resume_reason)
int heap_scancache_quick_start(HEAP_SCANCACHE *scan_cache)
Definition: heap_file.c:6913
int db_make_oid(DB_VALUE *value, const OID *oid)
float recs_sumlen
Definition: heap_file.c:191
#define VPID_GET_FROM_OID(vpid_ptr, oid_ptr)
Definition: page_buffer.h:44
#define OR_BOUND_BIT_FLAG
int heap_get_class_supers(THREAD_ENTRY *thread_p, const OID *class_oid, OID **super_oids, int *count)
Definition: heap_file.c:11003
struct heap_hdr_stats HEAP_HDR_STATS
Definition: heap_file.c:178
#define MVCC_SET_FLAG(header, flag)
Definition: mvcc.h:60
int oid_is_system_class(const OID *class_oid, bool *is_system_class_p)
Definition: oid.c:400
#define OID_ISNULL(oidp)
Definition: oid.h:82
#define SET_AUTO_INCREMENT_SERIAL_NAME(SR_NAME, CL_NAME, AT_NAME)
Definition: transform.h:176
LC_COPYAREA_MANYOBJS * mobjs
Definition: locator.h:249
enum mvcc_satisfies_snapshot_result MVCC_SATISFIES_SNAPSHOT_RESULT
Definition: mvcc.h:205
char * meta_data
Definition: dbtype_def.h:974
#define OR_PUT_BIGINT(ptr, val)
#define DONT_FREE
Definition: page_buffer.h:39
XML_ParsingStatus * status
Definition: expat.h:785
const OID oid_Null_oid
Definition: oid.c:69
#define HEAP_DEBUG_SCANCACHE_INITPATTERN
Definition: heap_file.c:92
FILE_OVF_HEAP_DES heap_overflow
Definition: file_manager.h:133
SCAN_CODE heap_next(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *next_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking)
Definition: heap_file.c:18504
int heap_indexinfo_get_num_attrs(int btid_index, HEAP_CACHE_ATTRINFO *attrinfo)
Definition: heap_file.c:12860
#define LANG_SYS_CODESET
#define TP_DOMAIN_CODESET(dom)
pthread_mutex_t LRU_mutex
Definition: heap_file.c:339
int collation_id
Definition: object_domain.h:94
union or_attribute::@25 auto_increment
INT16 spage_get_record_type(PAGE_PTR page_p, PGSLOTID slot_id)
int heap_header_capacity_end_scan(THREAD_ENTRY *thread_p, void **ptr)
Definition: heap_file.c:18027
#define STATIC_INLINE
Definition: porting.h:55
int heap_get_indexinfo_of_btid(THREAD_ENTRY *thread_p, const OID *class_oid, const BTID *btid, BTREE_TYPE *type, int *num_attrs, ATTR_ID **attr_ids, int **attrs_prefix_length, char **btnamepp, int *func_index_col_id)
Definition: heap_file.c:12984
#define pthread_mutex_lock(a)
Definition: heap_file.c:68
enum update_inplace_style UPDATE_INPLACE_STYLE
Definition: heap_file.h:240
#define LSA_INITIALIZER
char * oid_to_string(char *buf, int buf_size, OID *oid)
int heap_attrinfo_set(const OID *inst_oid, ATTR_ID attrid, DB_VALUE *attr_val, HEAP_CACHE_ATTRINFO *attr_info)
Definition: heap_file.c:11089
int heap_rv_undo_delete(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16002
PAGE_PTR pgptr
Definition: page_buffer.h:220
const void * data
Definition: log_impl.h:1809
OR_ATTRIBUTE * attributes
THREAD_ENTRY * next_wait_thrd
Definition: heap_file.c:306
void heap_clear_partition_info(THREAD_ENTRY *thread_p, OR_PARTITION *parts, int parts_count)
Definition: heap_file.c:10976
#define SSIZEOF(val)
Definition: system.h:259
int or_pad(OR_BUF *buf, int length)
#define ER_HEAP_CYCLE
Definition: error_code.h:105
#define pgbuf_ordered_unfix(thread_p, watcher_object)
Definition: page_buffer.h:271
char * vfid_to_string(char *buf, int buf_size, VFID *vfid)
type
#define MVCC_CLEAR_ALL_FLAG_BITS(rec_header_p)
Definition: mvcc.h:80
if( $result)
PGBUF_LATCH_MODE latch_mode
Definition: heap_file.h:353
#define LSA_LT(lsa_ptr1, lsa_ptr2)
#define HEAP_PERF_TRACK_EXECUTE(thread_p, context)
Definition: heap_file.c:522
int heap_rv_mvcc_redo_insert(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15534
int fetch_peek_dbval(THREAD_ENTRY *thread_p, REGU_VARIABLE *regu_var, VAL_DESCR *vd, OID *class_oid, OID *obj_oid, QFILE_TUPLE tpl, DB_VALUE **peek_dbval)
Definition: fetch.c:3853
int heap_rv_undoredo_update(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:16085
int overflow_get_length(THREAD_ENTRY *thread_p, const VPID *ovf_vpid)
bool heap_is_object_not_null(THREAD_ENTRY *thread_p, OID *class_oid, const OID *oid)
Definition: heap_file.c:8740
int heap_rv_undo_insert(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15628
int heap_get_class_name(THREAD_ENTRY *thread_p, const OID *class_oid, char **class_name)
Definition: heap_file.c:9220
#define db_private_realloc(thrd, ptr, size)
Definition: memory_alloc.h:232
#define PR_TYPE_FROM_ID(type)
int overflow_insert(THREAD_ENTRY *thread_p, const VFID *ovf_vfid, VPID *ovf_vpid, RECDES *recdes, FILE_TYPE file_type)
Definition: overflow_file.c:94
#define pgbuf_ordered_unfix_and_init(thread_p, page, pg_watcher)
Definition: page_buffer.h:67
double amount
Definition: dbtype_def.h:856
#define VPID_SET_NULL(vpid_ptr)
Definition: dbtype_def.h:931
int qdata_increment_dbval(DB_VALUE *dbval_p, DB_VALUE *result_p, int inc_val)
int xheap_reclaim_addresses(THREAD_ENTRY *thread_p, const HFID *hfid)
Definition: heap_file.c:5989
int heap_rv_undo_ovf_update(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:24670
#define HEAP_CHNGUESS_FUDGE_MININDICES
Definition: heap_file.c:406
#define PEEK()
Definition: regcomp.c:40
SCAN_CODE heap_get_visible_version_internal(THREAD_ENTRY *thread_p, HEAP_GET_CONTEXT *context, bool is_heap_scan)
Definition: heap_file.c:24158
struct heap_classrepr_lock HEAP_CLASSREPR_LOCK
Definition: heap_file.c:319
bool btree_is_unique_type(BTREE_TYPE type)
Definition: btree.c:5970
HEAP_CLASSREPR_FREE_LIST free_list
Definition: heap_file.c:361
void stx_free_additional_buff(THREAD_ENTRY *thread_p, void *xasl_unpack_info)
INT64 pageid
OR_ATTRIBUTE ** atts
char * buf
Definition: dbtype_def.h:891
#define PGBUF_PAGE_STATE_ARGS(pg)
Definition: page_buffer.h:55
HEAP_CLASSREPR_ENTRY * prev
Definition: heap_file.c:308
int heap_get_referenced_by(THREAD_ENTRY *thread_p, OID *class_oid, const OID *obj_oid, RECDES *recdes, int *max_oid_cnt, OID **oid_list)
Definition: heap_file.c:13153
int heap_compact_pages(THREAD_ENTRY *thread_p, OID *class_oid)
Definition: heap_file.c:16608
#define PGBUF_INIT_WATCHER(w, rank, hfid)
Definition: page_buffer.h:121
#define HFID_COPY(hfid_ptr1, hfid_ptr2)
#define ER_HEAP_BAD_OBJECT_TYPE
Definition: error_code.h:101
SCAN_CODE log_get_undo_record(THREAD_ENTRY *thread_p, LOG_PAGE *log_page_p, LOG_LSA process_lsa, RECDES *recdes)
#define VACUUM_LOG_ADD_DROPPED_FILE_UNDO
Definition: vacuum.h:77
#define VFID_SET_NULL(vfid_ptr)
Definition: file_manager.h:65
const char ** p
Definition: dynamic_load.c:946
const OID * oid_p
Definition: heap_file.h:339
#define OR_OFFSET_SIZE_1BYTE
void tp_domain_free(TP_DOMAIN *dom)
VPID * pgbuf_get_vpid_ptr(PAGE_PTR pgptr)
Definition: page_buffer.c:4544
PERF_UTIME_TRACKER * time_track
Definition: heap_file.h:290
UPDATE_INPLACE_STYLE update_in_place
Definition: heap_file.h:252
int or_advance(OR_BUF *buf, int offset)
DISK_ISVALID
Definition: disk_manager.h:53
int heap_insert_hfid_for_class_oid(THREAD_ENTRY *thread_p, const OID *class_oid, HFID *hfid, FILE_TYPE ftype)
Definition: heap_file.c:23275
#define OR_BUF_INIT2(buf, data, size)
#define HEAP_MAX_ALIGN
Definition: heap_file.h:58
Definition: heap_file.c:220
void heap_stats_update(THREAD_ENTRY *thread_p, PAGE_PTR pgptr, const HFID *hfid, int prev_freespace)
Definition: heap_file.c:2820
#define OR_MVCC_REPID_MASK
#define HEAP_IS_UPDATE_INPLACE(update_inplace_style)
Definition: heap_file.h:243
int spage_insert_at(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, RECDES *record_descriptor_p)
int heap_set_autoincrement_value(THREAD_ENTRY *thread_p, HEAP_CACHE_ATTRINFO *attr_info, HEAP_SCANCACHE *scan_cache, int *is_set)
Definition: heap_file.c:16345
void heap_free_func_pred_unpack_info(THREAD_ENTRY *thread_p, int n_indexes, FUNC_PRED_UNPACK_INFO *func_indx_preds, int *attr_info_started)
Definition: heap_file.c:17465
int heap_rv_mvcc_redo_delete_newhome(THREAD_ENTRY *thread_p, LOG_RCV *rcv)
Definition: heap_file.c:15949
#define HEAP_STATS_NEXT_BEST_INDEX(i)
Definition: heap_file.c:173
#define HEAP_ISVALID_OID(thread_p, oid)
Definition: heap_file.h:71
#define ER_MVCC_NOT_SATISFIED_REEVALUATION
Definition: error_code.h:1476
HEAP_CLASSREPR_LRU_LIST LRU_list
Definition: heap_file.c:360
const VPID * overflow_delete(THREAD_ENTRY *thread_p, const VFID *ovf_vfid, const VPID *ovf_vpid)
unsigned int of_key
Definition: lock_free.h:76
SCAN_CODE overflow_get_nbytes(THREAD_ENTRY *thread_p, const VPID *ovf_vpid, RECDES *recdes, int start_offset, int max_nbytes, int *remaining_length, MVCC_SNAPSHOT *mvcc_snapshot)
int spage_insert_for_recovery(THREAD_ENTRY *thread_p, PAGE_PTR page_p, PGSLOTID slot_id, RECDES *record_descriptor_p)
int db_value_domain_init(DB_VALUE *value, const DB_TYPE type, const int precision, const int scale)
Definition: db_macro.c:149
int strcasecmp(const char *, const char *)
SCAN_CODE heap_prev_record_info(THREAD_ENTRY *thread_p, const HFID *hfid, OID *class_oid, OID *next_oid, RECDES *recdes, HEAP_SCANCACHE *scan_cache, int ispeeking, DB_VALUE **cache_recordinfo)
Definition: heap_file.c:18579
#define TP_DOMAIN_COLLATION_FLAG(dom)
SCAN_CODE heap_page_next(THREAD_ENTRY *thread_p, const OID *class_oid, const HFID *hfid, VPID *next_vpid, DB_VALUE **cache_pageinfo)
Definition: heap_file.c:18151
int file_map_pages(THREAD_ENTRY *thread_p, const VFID *vfid, PGBUF_LATCH_MODE latch_mode, PGBUF_LATCH_CONDITION latch_cond, FILE_MAP_PAGE_FUNC func, void *args)
LF_ENTRY_DESCRIPTOR hfid_hash_descriptor
Definition: heap_file.h:165
DB_C_CHAR db_get_string(const DB_VALUE *value)
SCAN_CODE heap_get_class_record(THREAD_ENTRY *thread_p, const OID *class_oid, RECDES *recdes_p, HEAP_SCANCACHE *scan_cache, int ispeeking)
Definition: heap_file.c:24638
const char int len
Definition: expat.h:713